2026-03-10T13:01:41.736 INFO:root:teuthology version: 1.2.4.dev6+g1c580df7a 2026-03-10T13:01:41.743 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-10T13:01:41.766 INFO:teuthology.run:Config: archive_path: /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/1040 branch: squid description: orch/cephadm/upgrade/{1-start-distro/1-start-centos_9.stream 2-repo_digest/repo_digest 3-upgrade/staggered 4-wait 5-upgrade-ls agent/off mon_election/classic} email: null first_in_suite: false flavor: default job_id: '1040' last_in_suite: false machine_type: vps name: kyr-2026-03-10_01:00:38-orch-squid-none-default-vps no_nested_subset: false os_type: centos os_version: 9.stream overrides: admin_socket: branch: squid ansible.cephlab: branch: main skip_tags: nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs vars: timezone: UTC ceph: conf: global: mon election default strategy: 1 mgr: debug mgr: 20 debug ms: 1 mgr/cephadm/use_agent: false mon: debug mon: 20 debug ms: 1 debug paxos: 20 osd: debug ms: 1 debug osd: 20 osd mclock iops capacity threshold hdd: 49000 flavor: default log-ignorelist: - \(MDS_ALL_DOWN\) - \(MDS_UP_LESS_THAN_MAX\) - CEPHADM_STRAY_DAEMON - CEPHADM_FAILED_DAEMON - CEPHADM_AGENT_DOWN log-only-match: - CEPHADM_ sha1: e911bdebe5c8faa3800735d1568fcdca65db60df ceph-deploy: conf: client: log file: /var/log/ceph/ceph-$name.$pid.log mon: {} install: ceph: flavor: default sha1: e911bdebe5c8faa3800735d1568fcdca65db60df extra_system_packages: deb: - python3-xmltodict - python3-jmespath rpm: - bzip2 - perl-Test-Harness - python3-xmltodict - python3-jmespath workunit: branch: tt-squid sha1: 75a68fd8ca3f918fe9466b4c0bb385b7fc260a9b owner: kyr priority: 1000 repo: https://github.com/ceph/ceph.git roles: - - mon.a - mon.c - mgr.y - osd.0 - osd.1 - osd.2 - osd.3 - client.0 - node-exporter.a - alertmanager.a - - mon.b - mgr.x - osd.4 - osd.5 - osd.6 - osd.7 - client.1 - prometheus.a - grafana.a - node-exporter.b seed: 8043 sha1: e911bdebe5c8faa3800735d1568fcdca65db60df sleep_before_teardown: 0 subset: 1/64 suite: orch suite_branch: tt-squid suite_path: /home/teuthos/src/github.com_kshtsk_ceph_75a68fd8ca3f918fe9466b4c0bb385b7fc260a9b/qa suite_relpath: qa suite_repo: https://github.com/kshtsk/ceph.git suite_sha1: 75a68fd8ca3f918fe9466b4c0bb385b7fc260a9b targets: vm00.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBB3HElrdCqjMUa3cFZvFYqBNHrhVvMNTuNqsu1hZajDartiDRxLykYaGhmHsV3THdq553uY9DzOpvT0REcLpc9M= vm08.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPSnTaSGBYJt4icZ+bafXgsFkEI0NOKbYJp/otvSkcNXTufnP4AhWwo/DQ89u/LHzKFeBdLrC1DKc+fegS1B3S8= tasks: - cephadm: cephadm_branch: v17.2.0 cephadm_git_url: https://github.com/ceph/ceph image: quay.io/ceph/ceph:v17.2.0 - cephadm.shell: mon.a: - ceph config set mgr mgr/cephadm/use_repo_digest false --force - cephadm.shell: env: - sha1 mon.a: - radosgw-admin realm create --rgw-realm=r --default - radosgw-admin zonegroup create --rgw-zonegroup=default --master --default - radosgw-admin zone create --rgw-zonegroup=default --rgw-zone=z --master --default - radosgw-admin period update --rgw-realm=r --commit - ceph orch apply rgw foo --realm r --zone z --placement=2 --port=8000 - ceph osd pool create foo - rbd pool init foo - ceph orch apply iscsi foo u p - sleep 180 - ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force - ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force - ceph config set global log_to_journald false --force - ceph orch ps - ceph versions - ceph -s - ceph orch ls - ceph orch daemon redeploy "mgr.$(ceph mgr dump -f json | jq .standbys | jq .[] | jq -r .name)" --image quay.ceph.io/ceph-ci/ceph:$sha1 - ceph orch ps --refresh - sleep 180 - ceph orch ps - ceph versions - ceph -s - ceph health detail - ceph versions | jq -e '.mgr | length == 2' - ceph mgr fail - sleep 180 - ceph orch daemon redeploy "mgr.$(ceph mgr dump -f json | jq .standbys | jq .[] | jq -r .name)" --image quay.ceph.io/ceph-ci/ceph:$sha1 - ceph orch ps --refresh - sleep 180 - ceph orch ps - ceph versions - ceph health detail - ceph -s - ceph mgr fail - sleep 180 - ceph orch ps - ceph versions - ceph -s - ceph health detail - ceph versions | jq -e '.mgr | length == 1' - ceph mgr fail - sleep 180 - ceph orch ps - ceph orch ls - ceph versions - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done - ceph versions | jq -e '.mgr | length == 1' - ceph versions | jq -e '.mgr | keys' | grep $sha1 - ceph versions | jq -e '.overall | length == 2' - ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '.up_to_date | length == 2' - ceph orch upgrade status - ceph health detail - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mon --hosts $(ceph orch ps | grep mgr.x | awk '{print $2}') - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done - ceph orch ps - ceph versions | jq -e '.mon | length == 2' - ceph orch upgrade status - ceph health detail - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mon --hosts $(ceph orch ps | grep mgr.y | awk '{print $2}') - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done - ceph orch ps - ceph versions | jq -e '.mon | length == 1' - ceph versions | jq -e '.mon | keys' | grep $sha1 - ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '.up_to_date | length == 5' - ceph orch upgrade status - ceph health detail - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types osd --limit 2 - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done - ceph orch ps - ceph versions | jq -e '.osd | length == 2' - ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '.up_to_date | length == 7' - ceph orch upgrade status - ceph health detail - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types crash,osd --limit 1 - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done - ceph orch ps - ceph versions | jq -e '.osd | length == 2' - ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '.up_to_date | length == 8' - ceph orch upgrade status - ceph health detail - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types crash,osd - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done - ceph orch ps - ceph versions | jq -e '.osd | length == 1' - ceph versions | jq -e '.osd | keys' | grep $sha1 - ceph orch upgrade status - ceph health detail - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --services rgw.foo - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done - ceph orch ps - ceph versions | jq -e '.rgw | length == 1' - ceph versions | jq -e '.rgw | keys' | grep $sha1 - ceph orch upgrade status - ceph health detail - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 - cephadm.shell: env: - sha1 mon.a: - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; ceph health detail ; sleep 30 ; done - ceph orch ps - ceph versions - echo "wait for servicemap items w/ changing names to refresh" - sleep 60 - ceph orch ps - ceph versions - ceph orch upgrade status - ceph health detail - ceph versions | jq -e '.overall | length == 1' - ceph versions | jq -e '.overall | keys' | grep $sha1 - ceph orch ls | grep '^osd ' - cephadm.shell: mon.a: - ceph orch upgrade ls - ceph orch upgrade ls --image quay.io/ceph/ceph --show-all-versions | grep 16.2.0 - ceph orch upgrade ls --image quay.io/ceph/ceph --tags | grep v16.2.2 teuthology: fragments_dropped: [] meta: {} postmerge: [] teuthology_branch: clyso-debian-13 teuthology_repo: https://github.com/clyso/teuthology teuthology_sha1: 1c580df7a9c7c2aadc272da296344fd99f27c444 timestamp: 2026-03-10_01:00:38 tube: vps user: kyr verbose: false worker_log: /home/teuthos/.teuthology/dispatcher/dispatcher.vps.611473 2026-03-10T13:01:41.766 INFO:teuthology.run:suite_path is set to /home/teuthos/src/github.com_kshtsk_ceph_75a68fd8ca3f918fe9466b4c0bb385b7fc260a9b/qa; will attempt to use it 2026-03-10T13:01:41.766 INFO:teuthology.run:Found tasks at /home/teuthos/src/github.com_kshtsk_ceph_75a68fd8ca3f918fe9466b4c0bb385b7fc260a9b/qa/tasks 2026-03-10T13:01:41.766 INFO:teuthology.run_tasks:Running task internal.check_packages... 2026-03-10T13:01:41.767 INFO:teuthology.task.internal:Checking packages... 2026-03-10T13:01:41.767 INFO:teuthology.task.internal:Checking packages for os_type 'centos', flavor 'default' and ceph hash 'e911bdebe5c8faa3800735d1568fcdca65db60df' 2026-03-10T13:01:41.767 WARNING:teuthology.packaging:More than one of ref, tag, branch, or sha1 supplied; using branch 2026-03-10T13:01:41.767 INFO:teuthology.packaging:ref: None 2026-03-10T13:01:41.767 INFO:teuthology.packaging:tag: None 2026-03-10T13:01:41.767 INFO:teuthology.packaging:branch: squid 2026-03-10T13:01:41.767 INFO:teuthology.packaging:sha1: e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:01:41.767 DEBUG:teuthology.packaging:Querying https://shaman.ceph.com/api/search?status=ready&project=ceph&flavor=default&distros=centos%2F9%2Fx86_64&ref=squid 2026-03-10T13:01:42.534 INFO:teuthology.task.internal:Found packages for ceph version 19.2.3-678.ge911bdeb 2026-03-10T13:01:42.535 INFO:teuthology.run_tasks:Running task internal.buildpackages_prep... 2026-03-10T13:01:42.536 INFO:teuthology.task.internal:no buildpackages task found 2026-03-10T13:01:42.536 INFO:teuthology.run_tasks:Running task internal.save_config... 2026-03-10T13:01:42.536 INFO:teuthology.task.internal:Saving configuration 2026-03-10T13:01:42.544 INFO:teuthology.run_tasks:Running task internal.check_lock... 2026-03-10T13:01:42.545 INFO:teuthology.task.internal.check_lock:Checking locks... 2026-03-10T13:01:42.553 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm00.local', 'description': '/archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/1040', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-10 13:00:21.868987', 'locked_by': 'kyr', 'mac_address': '52:55:00:00:00:00', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBB3HElrdCqjMUa3cFZvFYqBNHrhVvMNTuNqsu1hZajDartiDRxLykYaGhmHsV3THdq553uY9DzOpvT0REcLpc9M='} 2026-03-10T13:01:42.559 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm08.local', 'description': '/archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/1040', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-10 13:00:21.868606', 'locked_by': 'kyr', 'mac_address': '52:55:00:00:00:08', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPSnTaSGBYJt4icZ+bafXgsFkEI0NOKbYJp/otvSkcNXTufnP4AhWwo/DQ89u/LHzKFeBdLrC1DKc+fegS1B3S8='} 2026-03-10T13:01:42.559 INFO:teuthology.run_tasks:Running task internal.add_remotes... 2026-03-10T13:01:42.560 INFO:teuthology.task.internal:roles: ubuntu@vm00.local - ['mon.a', 'mon.c', 'mgr.y', 'osd.0', 'osd.1', 'osd.2', 'osd.3', 'client.0', 'node-exporter.a', 'alertmanager.a'] 2026-03-10T13:01:42.560 INFO:teuthology.task.internal:roles: ubuntu@vm08.local - ['mon.b', 'mgr.x', 'osd.4', 'osd.5', 'osd.6', 'osd.7', 'client.1', 'prometheus.a', 'grafana.a', 'node-exporter.b'] 2026-03-10T13:01:42.560 INFO:teuthology.run_tasks:Running task console_log... 2026-03-10T13:01:42.567 DEBUG:teuthology.task.console_log:vm00 does not support IPMI; excluding 2026-03-10T13:01:42.574 DEBUG:teuthology.task.console_log:vm08 does not support IPMI; excluding 2026-03-10T13:01:42.574 DEBUG:teuthology.exit:Installing handler: Handler(exiter=, func=.kill_console_loggers at 0x7fc546f76170>, signals=[15]) 2026-03-10T13:01:42.574 INFO:teuthology.run_tasks:Running task internal.connect... 2026-03-10T13:01:42.575 INFO:teuthology.task.internal:Opening connections... 2026-03-10T13:01:42.575 DEBUG:teuthology.task.internal:connecting to ubuntu@vm00.local 2026-03-10T13:01:42.576 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm00.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-10T13:01:42.637 DEBUG:teuthology.task.internal:connecting to ubuntu@vm08.local 2026-03-10T13:01:42.637 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm08.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-10T13:01:42.696 INFO:teuthology.run_tasks:Running task internal.push_inventory... 2026-03-10T13:01:42.697 DEBUG:teuthology.orchestra.run.vm00:> uname -m 2026-03-10T13:01:42.741 INFO:teuthology.orchestra.run.vm00.stdout:x86_64 2026-03-10T13:01:42.741 DEBUG:teuthology.orchestra.run.vm00:> cat /etc/os-release 2026-03-10T13:01:42.798 INFO:teuthology.orchestra.run.vm00.stdout:NAME="CentOS Stream" 2026-03-10T13:01:42.799 INFO:teuthology.orchestra.run.vm00.stdout:VERSION="9" 2026-03-10T13:01:42.799 INFO:teuthology.orchestra.run.vm00.stdout:ID="centos" 2026-03-10T13:01:42.799 INFO:teuthology.orchestra.run.vm00.stdout:ID_LIKE="rhel fedora" 2026-03-10T13:01:42.799 INFO:teuthology.orchestra.run.vm00.stdout:VERSION_ID="9" 2026-03-10T13:01:42.799 INFO:teuthology.orchestra.run.vm00.stdout:PLATFORM_ID="platform:el9" 2026-03-10T13:01:42.799 INFO:teuthology.orchestra.run.vm00.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-10T13:01:42.799 INFO:teuthology.orchestra.run.vm00.stdout:ANSI_COLOR="0;31" 2026-03-10T13:01:42.799 INFO:teuthology.orchestra.run.vm00.stdout:LOGO="fedora-logo-icon" 2026-03-10T13:01:42.799 INFO:teuthology.orchestra.run.vm00.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-10T13:01:42.799 INFO:teuthology.orchestra.run.vm00.stdout:HOME_URL="https://centos.org/" 2026-03-10T13:01:42.799 INFO:teuthology.orchestra.run.vm00.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-10T13:01:42.799 INFO:teuthology.orchestra.run.vm00.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-10T13:01:42.799 INFO:teuthology.orchestra.run.vm00.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-10T13:01:42.799 INFO:teuthology.lock.ops:Updating vm00.local on lock server 2026-03-10T13:01:42.806 DEBUG:teuthology.orchestra.run.vm08:> uname -m 2026-03-10T13:01:42.825 INFO:teuthology.orchestra.run.vm08.stdout:x86_64 2026-03-10T13:01:42.826 DEBUG:teuthology.orchestra.run.vm08:> cat /etc/os-release 2026-03-10T13:01:42.881 INFO:teuthology.orchestra.run.vm08.stdout:NAME="CentOS Stream" 2026-03-10T13:01:42.881 INFO:teuthology.orchestra.run.vm08.stdout:VERSION="9" 2026-03-10T13:01:42.881 INFO:teuthology.orchestra.run.vm08.stdout:ID="centos" 2026-03-10T13:01:42.881 INFO:teuthology.orchestra.run.vm08.stdout:ID_LIKE="rhel fedora" 2026-03-10T13:01:42.881 INFO:teuthology.orchestra.run.vm08.stdout:VERSION_ID="9" 2026-03-10T13:01:42.881 INFO:teuthology.orchestra.run.vm08.stdout:PLATFORM_ID="platform:el9" 2026-03-10T13:01:42.881 INFO:teuthology.orchestra.run.vm08.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-10T13:01:42.881 INFO:teuthology.orchestra.run.vm08.stdout:ANSI_COLOR="0;31" 2026-03-10T13:01:42.881 INFO:teuthology.orchestra.run.vm08.stdout:LOGO="fedora-logo-icon" 2026-03-10T13:01:42.881 INFO:teuthology.orchestra.run.vm08.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-10T13:01:42.881 INFO:teuthology.orchestra.run.vm08.stdout:HOME_URL="https://centos.org/" 2026-03-10T13:01:42.881 INFO:teuthology.orchestra.run.vm08.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-10T13:01:42.882 INFO:teuthology.orchestra.run.vm08.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-10T13:01:42.882 INFO:teuthology.orchestra.run.vm08.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-10T13:01:42.882 INFO:teuthology.lock.ops:Updating vm08.local on lock server 2026-03-10T13:01:42.887 INFO:teuthology.run_tasks:Running task internal.serialize_remote_roles... 2026-03-10T13:01:42.890 INFO:teuthology.run_tasks:Running task internal.check_conflict... 2026-03-10T13:01:42.891 INFO:teuthology.task.internal:Checking for old test directory... 2026-03-10T13:01:42.891 DEBUG:teuthology.orchestra.run.vm00:> test '!' -e /home/ubuntu/cephtest 2026-03-10T13:01:42.893 DEBUG:teuthology.orchestra.run.vm08:> test '!' -e /home/ubuntu/cephtest 2026-03-10T13:01:42.938 INFO:teuthology.run_tasks:Running task internal.check_ceph_data... 2026-03-10T13:01:42.939 INFO:teuthology.task.internal:Checking for non-empty /var/lib/ceph... 2026-03-10T13:01:42.939 DEBUG:teuthology.orchestra.run.vm00:> test -z $(ls -A /var/lib/ceph) 2026-03-10T13:01:42.949 DEBUG:teuthology.orchestra.run.vm08:> test -z $(ls -A /var/lib/ceph) 2026-03-10T13:01:42.965 INFO:teuthology.orchestra.run.vm00.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-10T13:01:42.995 INFO:teuthology.orchestra.run.vm08.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-10T13:01:42.995 INFO:teuthology.run_tasks:Running task internal.vm_setup... 2026-03-10T13:01:43.009 DEBUG:teuthology.orchestra.run.vm00:> test -e /ceph-qa-ready 2026-03-10T13:01:43.026 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T13:01:43.232 DEBUG:teuthology.orchestra.run.vm08:> test -e /ceph-qa-ready 2026-03-10T13:01:43.247 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T13:01:43.428 INFO:teuthology.run_tasks:Running task internal.base... 2026-03-10T13:01:43.429 INFO:teuthology.task.internal:Creating test directory... 2026-03-10T13:01:43.429 DEBUG:teuthology.orchestra.run.vm00:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-10T13:01:43.431 DEBUG:teuthology.orchestra.run.vm08:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-10T13:01:43.448 INFO:teuthology.run_tasks:Running task internal.archive_upload... 2026-03-10T13:01:43.449 INFO:teuthology.run_tasks:Running task internal.archive... 2026-03-10T13:01:43.450 INFO:teuthology.task.internal:Creating archive directory... 2026-03-10T13:01:43.450 DEBUG:teuthology.orchestra.run.vm00:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-10T13:01:43.490 DEBUG:teuthology.orchestra.run.vm08:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-10T13:01:43.509 INFO:teuthology.run_tasks:Running task internal.coredump... 2026-03-10T13:01:43.511 INFO:teuthology.task.internal:Enabling coredump saving... 2026-03-10T13:01:43.511 DEBUG:teuthology.orchestra.run.vm00:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-10T13:01:43.561 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T13:01:43.561 DEBUG:teuthology.orchestra.run.vm08:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-10T13:01:43.577 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T13:01:43.578 DEBUG:teuthology.orchestra.run.vm00:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-10T13:01:43.604 DEBUG:teuthology.orchestra.run.vm08:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-10T13:01:43.635 INFO:teuthology.orchestra.run.vm00.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-10T13:01:43.649 INFO:teuthology.orchestra.run.vm00.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-10T13:01:43.649 INFO:teuthology.orchestra.run.vm08.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-10T13:01:43.661 INFO:teuthology.orchestra.run.vm08.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-10T13:01:43.664 INFO:teuthology.run_tasks:Running task internal.sudo... 2026-03-10T13:01:43.666 INFO:teuthology.task.internal:Configuring sudo... 2026-03-10T13:01:43.666 DEBUG:teuthology.orchestra.run.vm00:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-10T13:01:43.693 DEBUG:teuthology.orchestra.run.vm08:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-10T13:01:43.731 INFO:teuthology.run_tasks:Running task internal.syslog... 2026-03-10T13:01:43.733 INFO:teuthology.task.internal.syslog:Starting syslog monitoring... 2026-03-10T13:01:43.733 DEBUG:teuthology.orchestra.run.vm00:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-10T13:01:43.764 DEBUG:teuthology.orchestra.run.vm08:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-10T13:01:43.789 DEBUG:teuthology.orchestra.run.vm00:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-10T13:01:43.846 DEBUG:teuthology.orchestra.run.vm00:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-10T13:01:43.908 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-10T13:01:43.908 DEBUG:teuthology.orchestra.run.vm00:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-10T13:01:43.972 DEBUG:teuthology.orchestra.run.vm08:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-10T13:01:43.999 DEBUG:teuthology.orchestra.run.vm08:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-10T13:01:44.058 DEBUG:teuthology.orchestra.run.vm08:> set -ex 2026-03-10T13:01:44.058 DEBUG:teuthology.orchestra.run.vm08:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-10T13:01:44.128 DEBUG:teuthology.orchestra.run.vm00:> sudo service rsyslog restart 2026-03-10T13:01:44.130 DEBUG:teuthology.orchestra.run.vm08:> sudo service rsyslog restart 2026-03-10T13:01:44.157 INFO:teuthology.orchestra.run.vm00.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-10T13:01:44.205 INFO:teuthology.orchestra.run.vm08.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-10T13:01:44.544 INFO:teuthology.run_tasks:Running task internal.timer... 2026-03-10T13:01:44.545 INFO:teuthology.task.internal:Starting timer... 2026-03-10T13:01:44.545 INFO:teuthology.run_tasks:Running task pcp... 2026-03-10T13:01:44.548 INFO:teuthology.run_tasks:Running task selinux... 2026-03-10T13:01:44.551 INFO:teuthology.task.selinux:Excluding vm00: VMs are not yet supported 2026-03-10T13:01:44.551 INFO:teuthology.task.selinux:Excluding vm08: VMs are not yet supported 2026-03-10T13:01:44.551 DEBUG:teuthology.task.selinux:Getting current SELinux state 2026-03-10T13:01:44.551 DEBUG:teuthology.task.selinux:Existing SELinux modes: {} 2026-03-10T13:01:44.551 INFO:teuthology.task.selinux:Putting SELinux into permissive mode 2026-03-10T13:01:44.551 INFO:teuthology.run_tasks:Running task ansible.cephlab... 2026-03-10T13:01:44.552 DEBUG:teuthology.task:Applying overrides for task ansible.cephlab: {'branch': 'main', 'skip_tags': 'nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs', 'vars': {'timezone': 'UTC'}} 2026-03-10T13:01:44.553 DEBUG:teuthology.repo_utils:Setting repo remote to https://github.com/ceph/ceph-cm-ansible.git 2026-03-10T13:01:44.554 INFO:teuthology.repo_utils:Fetching github.com_ceph_ceph-cm-ansible_main from origin 2026-03-10T13:01:45.056 DEBUG:teuthology.repo_utils:Resetting repo at /home/teuthos/src/github.com_ceph_ceph-cm-ansible_main to origin/main 2026-03-10T13:01:45.061 INFO:teuthology.task.ansible:Playbook: [{'import_playbook': 'ansible_managed.yml'}, {'import_playbook': 'teuthology.yml'}, {'hosts': 'testnodes', 'tasks': [{'set_fact': {'ran_from_cephlab_playbook': True}}]}, {'import_playbook': 'testnodes.yml'}, {'import_playbook': 'container-host.yml'}, {'import_playbook': 'cobbler.yml'}, {'import_playbook': 'paddles.yml'}, {'import_playbook': 'pulpito.yml'}, {'hosts': 'testnodes', 'become': True, 'tasks': [{'name': 'Touch /ceph-qa-ready', 'file': {'path': '/ceph-qa-ready', 'state': 'touch'}, 'when': 'ran_from_cephlab_playbook|bool'}]}] 2026-03-10T13:01:45.062 DEBUG:teuthology.task.ansible:Running ansible-playbook -v --extra-vars '{"ansible_ssh_user": "ubuntu", "timezone": "UTC"}' -i /tmp/teuth_ansible_inventory46902kez --limit vm00.local,vm08.local /home/teuthos/src/github.com_ceph_ceph-cm-ansible_main/cephlab.yml --skip-tags nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs 2026-03-10T13:03:55.638 DEBUG:teuthology.task.ansible:Reconnecting to [Remote(name='ubuntu@vm00.local'), Remote(name='ubuntu@vm08.local')] 2026-03-10T13:03:55.639 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm00.local' 2026-03-10T13:03:55.639 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm00.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-10T13:03:55.709 DEBUG:teuthology.orchestra.run.vm00:> true 2026-03-10T13:03:55.790 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm00.local' 2026-03-10T13:03:55.790 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm08.local' 2026-03-10T13:03:55.790 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm08.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-10T13:03:55.853 DEBUG:teuthology.orchestra.run.vm08:> true 2026-03-10T13:03:55.942 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm08.local' 2026-03-10T13:03:55.942 INFO:teuthology.run_tasks:Running task clock... 2026-03-10T13:03:55.946 INFO:teuthology.task.clock:Syncing clocks and checking initial clock skew... 2026-03-10T13:03:55.946 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-10T13:03:55.946 DEBUG:teuthology.orchestra.run.vm00:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-10T13:03:55.949 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-10T13:03:55.949 DEBUG:teuthology.orchestra.run.vm08:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-10T13:03:55.989 INFO:teuthology.orchestra.run.vm00.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-10T13:03:56.008 INFO:teuthology.orchestra.run.vm00.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-10T13:03:56.027 INFO:teuthology.orchestra.run.vm08.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-10T13:03:56.042 INFO:teuthology.orchestra.run.vm00.stderr:sudo: ntpd: command not found 2026-03-10T13:03:56.048 INFO:teuthology.orchestra.run.vm08.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-10T13:03:56.059 INFO:teuthology.orchestra.run.vm00.stdout:506 Cannot talk to daemon 2026-03-10T13:03:56.078 INFO:teuthology.orchestra.run.vm00.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-10T13:03:56.081 INFO:teuthology.orchestra.run.vm08.stderr:sudo: ntpd: command not found 2026-03-10T13:03:56.093 INFO:teuthology.orchestra.run.vm00.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-10T13:03:56.095 INFO:teuthology.orchestra.run.vm08.stdout:506 Cannot talk to daemon 2026-03-10T13:03:56.116 INFO:teuthology.orchestra.run.vm08.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-10T13:03:56.133 INFO:teuthology.orchestra.run.vm08.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-10T13:03:56.150 INFO:teuthology.orchestra.run.vm00.stderr:bash: line 1: ntpq: command not found 2026-03-10T13:03:56.153 INFO:teuthology.orchestra.run.vm00.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-10T13:03:56.153 INFO:teuthology.orchestra.run.vm00.stdout:=============================================================================== 2026-03-10T13:03:56.187 INFO:teuthology.orchestra.run.vm08.stderr:bash: line 1: ntpq: command not found 2026-03-10T13:03:56.190 INFO:teuthology.orchestra.run.vm08.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-10T13:03:56.190 INFO:teuthology.orchestra.run.vm08.stdout:=============================================================================== 2026-03-10T13:03:56.190 INFO:teuthology.run_tasks:Running task cephadm... 2026-03-10T13:03:56.242 INFO:tasks.cephadm:Config: {'cephadm_branch': 'v17.2.0', 'cephadm_git_url': 'https://github.com/ceph/ceph', 'image': 'quay.io/ceph/ceph:v17.2.0', 'conf': {'global': {'mon election default strategy': 1}, 'mgr': {'debug mgr': 20, 'debug ms': 1, 'mgr/cephadm/use_agent': False}, 'mon': {'debug mon': 20, 'debug ms': 1, 'debug paxos': 20}, 'osd': {'debug ms': 1, 'debug osd': 20, 'osd mclock iops capacity threshold hdd': 49000}}, 'flavor': 'default', 'log-ignorelist': ['\\(MDS_ALL_DOWN\\)', '\\(MDS_UP_LESS_THAN_MAX\\)', 'CEPHADM_STRAY_DAEMON', 'CEPHADM_FAILED_DAEMON', 'CEPHADM_AGENT_DOWN'], 'log-only-match': ['CEPHADM_'], 'sha1': 'e911bdebe5c8faa3800735d1568fcdca65db60df'} 2026-03-10T13:03:56.242 INFO:tasks.cephadm:Cluster image is quay.io/ceph/ceph:v17.2.0 2026-03-10T13:03:56.243 INFO:tasks.cephadm:Cluster fsid is 98a3dada-1c81-11f1-89c9-d57c120f78d5 2026-03-10T13:03:56.243 INFO:tasks.cephadm:Choosing monitor IPs and ports... 2026-03-10T13:03:56.243 INFO:tasks.cephadm:Monitor IPs: {'mon.a': '192.168.123.100', 'mon.c': '[v2:192.168.123.100:3301,v1:192.168.123.100:6790]', 'mon.b': '192.168.123.108'} 2026-03-10T13:03:56.243 INFO:tasks.cephadm:First mon is mon.a on vm00 2026-03-10T13:03:56.243 INFO:tasks.cephadm:First mgr is y 2026-03-10T13:03:56.243 INFO:tasks.cephadm:Normalizing hostnames... 2026-03-10T13:03:56.243 DEBUG:teuthology.orchestra.run.vm00:> sudo hostname $(hostname -s) 2026-03-10T13:03:56.281 DEBUG:teuthology.orchestra.run.vm08:> sudo hostname $(hostname -s) 2026-03-10T13:03:56.316 INFO:tasks.cephadm:Downloading cephadm (repo https://github.com/ceph/ceph ref v17.2.0)... 2026-03-10T13:03:56.316 DEBUG:teuthology.orchestra.run.vm00:> curl --silent https://raw.githubusercontent.com/ceph/ceph/v17.2.0/src/cephadm/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-10T13:03:56.585 INFO:teuthology.orchestra.run.vm00.stdout:-rw-r--r--. 1 ubuntu ubuntu 320521 Mar 10 13:03 /home/ubuntu/cephtest/cephadm 2026-03-10T13:03:56.585 DEBUG:teuthology.orchestra.run.vm08:> curl --silent https://raw.githubusercontent.com/ceph/ceph/v17.2.0/src/cephadm/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-10T13:03:56.674 INFO:teuthology.orchestra.run.vm08.stdout:-rw-r--r--. 1 ubuntu ubuntu 320521 Mar 10 13:03 /home/ubuntu/cephtest/cephadm 2026-03-10T13:03:56.675 DEBUG:teuthology.orchestra.run.vm00:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-10T13:03:56.697 DEBUG:teuthology.orchestra.run.vm08:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-10T13:03:56.726 INFO:tasks.cephadm:Pulling image quay.io/ceph/ceph:v17.2.0 on all hosts... 2026-03-10T13:03:56.726 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 pull 2026-03-10T13:03:56.742 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 pull 2026-03-10T13:03:56.926 INFO:teuthology.orchestra.run.vm00.stderr:Pulling container image quay.io/ceph/ceph:v17.2.0... 2026-03-10T13:03:57.011 INFO:teuthology.orchestra.run.vm08.stderr:Pulling container image quay.io/ceph/ceph:v17.2.0... 2026-03-10T13:04:20.103 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:04:20.103 INFO:teuthology.orchestra.run.vm00.stdout: "ceph_version": "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)", 2026-03-10T13:04:20.103 INFO:teuthology.orchestra.run.vm00.stdout: "image_id": "e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9", 2026-03-10T13:04:20.103 INFO:teuthology.orchestra.run.vm00.stdout: "repo_digests": [ 2026-03-10T13:04:20.103 INFO:teuthology.orchestra.run.vm00.stdout: "quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a", 2026-03-10T13:04:20.103 INFO:teuthology.orchestra.run.vm00.stdout: "quay.io/ceph/ceph@sha256:cb4d698cb769b6aba05bf6ef04f41a7fe694160140347576e13bd9348514b667" 2026-03-10T13:04:20.103 INFO:teuthology.orchestra.run.vm00.stdout: ] 2026-03-10T13:04:20.103 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:04:21.142 INFO:teuthology.orchestra.run.vm08.stdout:{ 2026-03-10T13:04:21.142 INFO:teuthology.orchestra.run.vm08.stdout: "ceph_version": "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)", 2026-03-10T13:04:21.142 INFO:teuthology.orchestra.run.vm08.stdout: "image_id": "e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9", 2026-03-10T13:04:21.142 INFO:teuthology.orchestra.run.vm08.stdout: "repo_digests": [ 2026-03-10T13:04:21.142 INFO:teuthology.orchestra.run.vm08.stdout: "quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a", 2026-03-10T13:04:21.142 INFO:teuthology.orchestra.run.vm08.stdout: "quay.io/ceph/ceph@sha256:cb4d698cb769b6aba05bf6ef04f41a7fe694160140347576e13bd9348514b667" 2026-03-10T13:04:21.142 INFO:teuthology.orchestra.run.vm08.stdout: ] 2026-03-10T13:04:21.142 INFO:teuthology.orchestra.run.vm08.stdout:} 2026-03-10T13:04:21.156 DEBUG:teuthology.orchestra.run.vm00:> sudo mkdir -p /etc/ceph 2026-03-10T13:04:21.192 DEBUG:teuthology.orchestra.run.vm08:> sudo mkdir -p /etc/ceph 2026-03-10T13:04:21.236 DEBUG:teuthology.orchestra.run.vm00:> sudo chmod 777 /etc/ceph 2026-03-10T13:04:21.269 DEBUG:teuthology.orchestra.run.vm08:> sudo chmod 777 /etc/ceph 2026-03-10T13:04:21.300 INFO:tasks.cephadm:Writing seed config... 2026-03-10T13:04:21.300 INFO:tasks.cephadm: override: [global] mon election default strategy = 1 2026-03-10T13:04:21.300 INFO:tasks.cephadm: override: [mgr] debug mgr = 20 2026-03-10T13:04:21.300 INFO:tasks.cephadm: override: [mgr] debug ms = 1 2026-03-10T13:04:21.300 INFO:tasks.cephadm: override: [mgr] mgr/cephadm/use_agent = False 2026-03-10T13:04:21.300 INFO:tasks.cephadm: override: [mon] debug mon = 20 2026-03-10T13:04:21.300 INFO:tasks.cephadm: override: [mon] debug ms = 1 2026-03-10T13:04:21.300 INFO:tasks.cephadm: override: [mon] debug paxos = 20 2026-03-10T13:04:21.301 INFO:tasks.cephadm: override: [osd] debug ms = 1 2026-03-10T13:04:21.301 INFO:tasks.cephadm: override: [osd] debug osd = 20 2026-03-10T13:04:21.301 INFO:tasks.cephadm: override: [osd] osd mclock iops capacity threshold hdd = 49000 2026-03-10T13:04:21.301 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-10T13:04:21.301 DEBUG:teuthology.orchestra.run.vm00:> dd of=/home/ubuntu/cephtest/seed.ceph.conf 2026-03-10T13:04:21.333 DEBUG:tasks.cephadm:Final config: [global] # make logging friendly to teuthology log_to_file = true log_to_stderr = false log to journald = false mon cluster log to file = true mon cluster log file level = debug mon clock drift allowed = 1.000 # replicate across OSDs, not hosts osd crush chooseleaf type = 0 #osd pool default size = 2 osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd # enable some debugging auth debug = true ms die on old message = true ms die on bug = true debug asserts on shutdown = true # adjust warnings mon max pg per osd = 10000# >= luminous mon pg warn max object skew = 0 mon osd allow primary affinity = true mon osd allow pg remap = true mon warn on legacy crush tunables = false mon warn on crush straw calc version zero = false mon warn on no sortbitwise = false mon warn on osd down out interval zero = false mon warn on too few osds = false mon_warn_on_pool_pg_num_not_power_of_two = false # disable pg_autoscaler by default for new pools osd_pool_default_pg_autoscale_mode = off # tests delete pools mon allow pool delete = true fsid = 98a3dada-1c81-11f1-89c9-d57c120f78d5 mon election default strategy = 1 [osd] osd scrub load threshold = 5.0 osd scrub max interval = 600 osd mclock profile = high_recovery_ops osd recover clone overlap = true osd recovery max chunk = 1048576 osd deep scrub update digest min age = 30 osd map max advance = 10 osd memory target autotune = true # debugging osd debug shutdown = true osd debug op order = true osd debug verify stray on activate = true osd debug pg log writeout = true osd debug verify cached snaps = true osd debug verify missing on start = true osd debug misdirected ops = true osd op queue = debug_random osd op queue cut off = debug_random osd shutdown pgref assert = true bdev debug aio = true osd sloppy crc = true debug ms = 1 debug osd = 20 osd mclock iops capacity threshold hdd = 49000 [mgr] mon reweight min pgs per osd = 4 mon reweight min bytes per osd = 10 mgr/telemetry/nag = false debug mgr = 20 debug ms = 1 mgr/cephadm/use_agent = False [mon] mon data avail warn = 5 mon mgr mkfs grace = 240 mon reweight min pgs per osd = 4 mon osd reporter subtree level = osd mon osd prime pg temp = true mon reweight min bytes per osd = 10 # rotate auth tickets quickly to exercise renewal paths auth mon ticket ttl = 660# 11m auth service ticket ttl = 240# 4m # don't complain about global id reclaim mon_warn_on_insecure_global_id_reclaim = false mon_warn_on_insecure_global_id_reclaim_allowed = false debug mon = 20 debug ms = 1 debug paxos = 20 [client.rgw] rgw cache enabled = true rgw enable ops log = true rgw enable usage log = true 2026-03-10T13:04:21.334 DEBUG:teuthology.orchestra.run.vm00:mon.a> sudo journalctl -f -n 0 -u ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mon.a.service 2026-03-10T13:04:21.377 DEBUG:teuthology.orchestra.run.vm00:mgr.y> sudo journalctl -f -n 0 -u ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mgr.y.service 2026-03-10T13:04:21.420 INFO:tasks.cephadm:Bootstrapping... 2026-03-10T13:04:21.420 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 -v bootstrap --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 --config /home/ubuntu/cephtest/seed.ceph.conf --output-config /etc/ceph/ceph.conf --output-keyring /etc/ceph/ceph.client.admin.keyring --output-pub-ssh-key /home/ubuntu/cephtest/ceph.pub --mon-id a --mgr-id y --orphan-initial-daemons --skip-monitoring-stack --mon-ip 192.168.123.100 --skip-admin-label && sudo chmod +r /etc/ceph/ceph.client.admin.keyring 2026-03-10T13:04:21.585 INFO:teuthology.orchestra.run.vm00.stderr:-------------------------------------------------------------------------------- 2026-03-10T13:04:21.585 INFO:teuthology.orchestra.run.vm00.stderr:cephadm ['--image', 'quay.io/ceph/ceph:v17.2.0', '-v', 'bootstrap', '--fsid', '98a3dada-1c81-11f1-89c9-d57c120f78d5', '--config', '/home/ubuntu/cephtest/seed.ceph.conf', '--output-config', '/etc/ceph/ceph.conf', '--output-keyring', '/etc/ceph/ceph.client.admin.keyring', '--output-pub-ssh-key', '/home/ubuntu/cephtest/ceph.pub', '--mon-id', 'a', '--mgr-id', 'y', '--orphan-initial-daemons', '--skip-monitoring-stack', '--mon-ip', '192.168.123.100', '--skip-admin-label'] 2026-03-10T13:04:21.605 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: 5.8.0 2026-03-10T13:04:21.609 INFO:teuthology.orchestra.run.vm00.stderr:Verifying podman|docker is present... 2026-03-10T13:04:21.637 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: 5.8.0 2026-03-10T13:04:21.639 INFO:teuthology.orchestra.run.vm00.stderr:Verifying lvm2 is present... 2026-03-10T13:04:21.640 INFO:teuthology.orchestra.run.vm00.stderr:Verifying time synchronization is in place... 2026-03-10T13:04:21.649 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: Failed to get unit file state for chrony.service: No such file or directory 2026-03-10T13:04:21.657 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: inactive 2026-03-10T13:04:21.665 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: enabled 2026-03-10T13:04:21.672 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: active 2026-03-10T13:04:21.672 INFO:teuthology.orchestra.run.vm00.stderr:Unit chronyd.service is enabled and running 2026-03-10T13:04:21.672 INFO:teuthology.orchestra.run.vm00.stderr:Repeating the final host check... 2026-03-10T13:04:21.691 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: 5.8.0 2026-03-10T13:04:21.697 INFO:teuthology.orchestra.run.vm00.stderr:podman (/bin/podman) version 5.8.0 is present 2026-03-10T13:04:21.726 INFO:teuthology.orchestra.run.vm00.stderr:systemctl is present 2026-03-10T13:04:21.726 INFO:teuthology.orchestra.run.vm00.stderr:lvcreate is present 2026-03-10T13:04:21.727 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: Failed to get unit file state for chrony.service: No such file or directory 2026-03-10T13:04:21.727 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: inactive 2026-03-10T13:04:21.727 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: enabled 2026-03-10T13:04:21.727 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: active 2026-03-10T13:04:21.727 INFO:teuthology.orchestra.run.vm00.stderr:Unit chronyd.service is enabled and running 2026-03-10T13:04:21.727 INFO:teuthology.orchestra.run.vm00.stderr:Host looks OK 2026-03-10T13:04:21.727 INFO:teuthology.orchestra.run.vm00.stderr:Cluster fsid: 98a3dada-1c81-11f1-89c9-d57c120f78d5 2026-03-10T13:04:21.727 INFO:teuthology.orchestra.run.vm00.stderr:Acquiring lock 139699523349904 on /run/cephadm/98a3dada-1c81-11f1-89c9-d57c120f78d5.lock 2026-03-10T13:04:21.727 INFO:teuthology.orchestra.run.vm00.stderr:Lock 139699523349904 acquired on /run/cephadm/98a3dada-1c81-11f1-89c9-d57c120f78d5.lock 2026-03-10T13:04:21.727 INFO:teuthology.orchestra.run.vm00.stderr:Verifying IP 192.168.123.100 port 3300 ... 2026-03-10T13:04:21.728 INFO:teuthology.orchestra.run.vm00.stderr:Verifying IP 192.168.123.100 port 6789 ... 2026-03-10T13:04:21.728 INFO:teuthology.orchestra.run.vm00.stderr:Base mon IP is 192.168.123.100, final addrv is [v2:192.168.123.100:3300,v1:192.168.123.100:6789] 2026-03-10T13:04:21.731 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: default via 192.168.123.1 dev eth0 proto dhcp src 192.168.123.100 metric 100 2026-03-10T13:04:21.731 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: 192.168.123.0/24 dev eth0 proto kernel scope link src 192.168.123.100 metric 100 2026-03-10T13:04:21.736 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: ::1 dev lo proto kernel metric 256 pref medium 2026-03-10T13:04:21.736 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: fe80::/64 dev eth0 proto kernel metric 1024 pref medium 2026-03-10T13:04:21.739 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: 1: lo: mtu 65536 state UNKNOWN qlen 1000 2026-03-10T13:04:21.739 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: inet6 ::1/128 scope host 2026-03-10T13:04:21.739 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: valid_lft forever preferred_lft forever 2026-03-10T13:04:21.739 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: 2: eth0: mtu 1500 state UP qlen 1000 2026-03-10T13:04:21.740 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: inet6 fe80::5055:ff:fe00:0/64 scope link noprefixroute 2026-03-10T13:04:21.740 INFO:teuthology.orchestra.run.vm00.stderr:/sbin/ip: valid_lft forever preferred_lft forever 2026-03-10T13:04:21.740 INFO:teuthology.orchestra.run.vm00.stderr:Mon IP `192.168.123.100` is in CIDR network `192.168.123.0/24` 2026-03-10T13:04:21.740 INFO:teuthology.orchestra.run.vm00.stderr:- internal network (--cluster-network) has not been provided, OSD replication will default to the public_network 2026-03-10T13:04:21.741 INFO:teuthology.orchestra.run.vm00.stderr:Pulling container image quay.io/ceph/ceph:v17.2.0... 2026-03-10T13:04:21.764 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: Trying to pull quay.io/ceph/ceph:v17.2.0... 2026-03-10T13:04:23.104 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: Getting image source signatures 2026-03-10T13:04:23.104 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: Copying blob sha256:33ca8fff7868c4dc0c11e09bca97c720eb9cfbab7221216754367dd8de70388a 2026-03-10T13:04:23.104 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: Copying blob sha256:89b4a75bc2d8500f15463747507c9623df43886c134463e7f0527e70900e7a7b 2026-03-10T13:04:23.104 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: Copying blob sha256:c32ab78b488d0c72f64eded765c0cf6b5bf2c75dab66cb62a9d367fa6ec42513 2026-03-10T13:04:23.104 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: Copying blob sha256:599d07cb321ff0a3c82224e1138fc685793fa69b93ed5780415751a5f7e4b8c2 2026-03-10T13:04:23.104 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: Copying blob sha256:a70843738bb77e1ab9c1f85969ebdfa55f178e746be081d1cb4f94011f69eb7c 2026-03-10T13:04:23.104 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: Copying config sha256:e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9 2026-03-10T13:04:23.106 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: Writing manifest to image destination 2026-03-10T13:04:23.110 INFO:teuthology.orchestra.run.vm00.stderr:/bin/podman: e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9 2026-03-10T13:04:23.255 INFO:teuthology.orchestra.run.vm00.stderr:ceph: ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable) 2026-03-10T13:04:23.314 INFO:teuthology.orchestra.run.vm00.stderr:Ceph version: ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable) 2026-03-10T13:04:23.314 INFO:teuthology.orchestra.run.vm00.stderr:Extracting ceph user uid/gid from container image... 2026-03-10T13:04:23.386 INFO:teuthology.orchestra.run.vm00.stderr:stat: 167 167 2026-03-10T13:04:23.406 INFO:teuthology.orchestra.run.vm00.stderr:Creating initial keys... 2026-03-10T13:04:23.499 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph-authtool: AQDXFrBp9cWsHRAALtWpObXbFgCppMXYu/IfSg== 2026-03-10T13:04:23.630 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph-authtool: AQDXFrBprJJzJRAANt0BZW9Wbt3h2h69qj/N4Q== 2026-03-10T13:04:23.745 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph-authtool: AQDXFrBpBjNLLBAARsrnCHbzMAuuol+05tN3fw== 2026-03-10T13:04:23.763 INFO:teuthology.orchestra.run.vm00.stderr:Creating initial monmap... 2026-03-10T13:04:24.115 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/monmaptool: /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-10T13:04:24.116 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/monmaptool: setting min_mon_release = octopus 2026-03-10T13:04:24.116 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/monmaptool: /usr/bin/monmaptool: set fsid to 98a3dada-1c81-11f1-89c9-d57c120f78d5 2026-03-10T13:04:24.116 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/monmaptool: /usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-10T13:04:24.311 INFO:teuthology.orchestra.run.vm00.stderr:monmaptool for a [v2:192.168.123.100:3300,v1:192.168.123.100:6789] on /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-10T13:04:24.311 INFO:teuthology.orchestra.run.vm00.stderr:setting min_mon_release = octopus 2026-03-10T13:04:24.311 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/monmaptool: set fsid to 98a3dada-1c81-11f1-89c9-d57c120f78d5 2026-03-10T13:04:24.311 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-10T13:04:24.311 INFO:teuthology.orchestra.run.vm00.stderr: 2026-03-10T13:04:24.311 INFO:teuthology.orchestra.run.vm00.stderr:Creating mon... 2026-03-10T13:04:24.617 INFO:teuthology.orchestra.run.vm00.stderr:create mon.a on 2026-03-10T13:04:25.158 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: Created symlink /etc/systemd/system/multi-user.target.wants/ceph.target → /etc/systemd/system/ceph.target. 2026-03-10T13:04:25.518 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: Created symlink /etc/systemd/system/multi-user.target.wants/ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5.target → /etc/systemd/system/ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5.target. 2026-03-10T13:04:25.518 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: Created symlink /etc/systemd/system/ceph.target.wants/ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5.target → /etc/systemd/system/ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5.target. 2026-03-10T13:04:25.877 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: Failed to reset failed state of unit ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mon.a.service: Unit ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mon.a.service not loaded. 2026-03-10T13:04:25.894 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: Created symlink /etc/systemd/system/ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5.target.wants/ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mon.a.service → /etc/systemd/system/ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@.service. 2026-03-10T13:04:26.265 INFO:teuthology.orchestra.run.vm00.stderr:firewalld does not appear to be present 2026-03-10T13:04:26.265 INFO:teuthology.orchestra.run.vm00.stderr:Not possible to enable service . firewalld.service is not available 2026-03-10T13:04:26.265 INFO:teuthology.orchestra.run.vm00.stderr:Waiting for mon to start... 2026-03-10T13:04:26.265 INFO:teuthology.orchestra.run.vm00.stderr:Waiting for mon... 2026-03-10T13:04:26.477 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: cluster: 2026-03-10T13:04:26.477 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: id: 98a3dada-1c81-11f1-89c9-d57c120f78d5 2026-03-10T13:04:26.477 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: health: HEALTH_OK 2026-03-10T13:04:26.477 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T13:04:26.477 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: services: 2026-03-10T13:04:26.477 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mon: 1 daemons, quorum a (age 0.15451s) 2026-03-10T13:04:26.477 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mgr: no daemons active 2026-03-10T13:04:26.477 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: osd: 0 osds: 0 up, 0 in 2026-03-10T13:04:26.477 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T13:04:26.477 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: data: 2026-03-10T13:04:26.477 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: pools: 0 pools, 0 pgs 2026-03-10T13:04:26.477 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: objects: 0 objects, 0 B 2026-03-10T13:04:26.477 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: usage: 0 B used, 0 B / 0 B avail 2026-03-10T13:04:26.477 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: pgs: 2026-03-10T13:04:26.477 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T13:04:26.507 INFO:teuthology.orchestra.run.vm00.stderr:mon is available 2026-03-10T13:04:26.507 INFO:teuthology.orchestra.run.vm00.stderr:Assimilating anything we can from ceph.conf... 2026-03-10T13:04:26.721 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T13:04:26.721 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: [global] 2026-03-10T13:04:26.721 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: fsid = 98a3dada-1c81-11f1-89c9-d57c120f78d5 2026-03-10T13:04:26.721 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mon_host = [v2:192.168.123.100:3300,v1:192.168.123.100:6789] 2026-03-10T13:04:26.721 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mon_osd_allow_pg_remap = true 2026-03-10T13:04:26.721 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mon_osd_allow_primary_affinity = true 2026-03-10T13:04:26.721 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mon_warn_on_no_sortbitwise = false 2026-03-10T13:04:26.721 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: osd_crush_chooseleaf_type = 0 2026-03-10T13:04:26.721 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T13:04:26.722 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: [mgr] 2026-03-10T13:04:26.722 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mgr/cephadm/use_agent = False 2026-03-10T13:04:26.722 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mgr/telemetry/nag = false 2026-03-10T13:04:26.722 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T13:04:26.722 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: [osd] 2026-03-10T13:04:26.722 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: osd_map_max_advance = 10 2026-03-10T13:04:26.722 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: osd_mclock_iops_capacity_threshold_hdd = 49000 2026-03-10T13:04:26.722 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: osd_sloppy_crc = true 2026-03-10T13:04:26.784 INFO:teuthology.orchestra.run.vm00.stderr:Generating new minimal ceph.conf... 2026-03-10T13:04:26.995 INFO:teuthology.orchestra.run.vm00.stderr:Restarting the monitor... 2026-03-10T13:04:27.425 INFO:teuthology.orchestra.run.vm00.stderr:Setting mon public_network to 192.168.123.0/24 2026-03-10T13:04:27.716 INFO:teuthology.orchestra.run.vm00.stderr:Wrote config to /etc/ceph/ceph.conf 2026-03-10T13:04:27.716 INFO:teuthology.orchestra.run.vm00.stderr:Wrote keyring to /etc/ceph/ceph.client.admin.keyring 2026-03-10T13:04:27.716 INFO:teuthology.orchestra.run.vm00.stderr:Creating mgr... 2026-03-10T13:04:27.716 INFO:teuthology.orchestra.run.vm00.stderr:Verifying port 9283 ... 2026-03-10T13:04:27.890 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: Failed to reset failed state of unit ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mgr.y.service: Unit ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mgr.y.service not loaded. 2026-03-10T13:04:27.898 INFO:teuthology.orchestra.run.vm00.stderr:systemctl: Created symlink /etc/systemd/system/ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5.target.wants/ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mgr.y.service → /etc/systemd/system/ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@.service. 2026-03-10T13:04:28.272 INFO:teuthology.orchestra.run.vm00.stderr:firewalld does not appear to be present 2026-03-10T13:04:28.272 INFO:teuthology.orchestra.run.vm00.stderr:Not possible to enable service . firewalld.service is not available 2026-03-10T13:04:28.272 INFO:teuthology.orchestra.run.vm00.stderr:firewalld does not appear to be present 2026-03-10T13:04:28.272 INFO:teuthology.orchestra.run.vm00.stderr:Not possible to open ports <[9283]>. firewalld.service is not available 2026-03-10T13:04:28.272 INFO:teuthology.orchestra.run.vm00.stderr:Waiting for mgr to start... 2026-03-10T13:04:28.272 INFO:teuthology.orchestra.run.vm00.stderr:Waiting for mgr... 2026-03-10T13:04:28.551 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T13:04:28.551 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: { 2026-03-10T13:04:28.551 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "fsid": "98a3dada-1c81-11f1-89c9-d57c120f78d5", 2026-03-10T13:04:28.551 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "health": { 2026-03-10T13:04:28.551 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "status": "HEALTH_OK", 2026-03-10T13:04:28.551 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "checks": {}, 2026-03-10T13:04:28.551 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mutes": [] 2026-03-10T13:04:28.551 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T13:04:28.551 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "election_epoch": 5, 2026-03-10T13:04:28.551 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum": [ 2026-03-10T13:04:28.551 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 0 2026-03-10T13:04:28.551 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-10T13:04:28.551 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum_names": [ 2026-03-10T13:04:28.551 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "a" 2026-03-10T13:04:28.551 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-10T13:04:28.551 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum_age": 1, 2026-03-10T13:04:28.551 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "monmap": { 2026-03-10T13:04:28.551 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T13:04:28.551 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "min_mon_release_name": "quincy", 2026-03-10T13:04:28.551 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_mons": 1 2026-03-10T13:04:28.551 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T13:04:28.551 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osdmap": { 2026-03-10T13:04:28.551 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T13:04:28.551 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_osds": 0, 2026-03-10T13:04:28.551 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_up_osds": 0, 2026-03-10T13:04:28.551 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osd_up_since": 0, 2026-03-10T13:04:28.551 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_in_osds": 0, 2026-03-10T13:04:28.552 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osd_in_since": 0, 2026-03-10T13:04:28.552 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_remapped_pgs": 0 2026-03-10T13:04:28.552 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T13:04:28.552 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "pgmap": { 2026-03-10T13:04:28.552 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "pgs_by_state": [], 2026-03-10T13:04:28.552 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_pgs": 0, 2026-03-10T13:04:28.552 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_pools": 0, 2026-03-10T13:04:28.552 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_objects": 0, 2026-03-10T13:04:28.552 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "data_bytes": 0, 2026-03-10T13:04:28.552 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_used": 0, 2026-03-10T13:04:28.552 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_avail": 0, 2026-03-10T13:04:28.552 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_total": 0 2026-03-10T13:04:28.552 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T13:04:28.552 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "fsmap": { 2026-03-10T13:04:28.552 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T13:04:28.552 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "by_rank": [], 2026-03-10T13:04:28.552 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "up:standby": 0 2026-03-10T13:04:28.552 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T13:04:28.552 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mgrmap": { 2026-03-10T13:04:28.552 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "available": false, 2026-03-10T13:04:28.552 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_standbys": 0, 2026-03-10T13:04:28.552 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "modules": [ 2026-03-10T13:04:28.552 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "iostat", 2026-03-10T13:04:28.552 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "nfs", 2026-03-10T13:04:28.552 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "restful" 2026-03-10T13:04:28.552 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-10T13:04:28.552 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "services": {} 2026-03-10T13:04:28.552 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T13:04:28.552 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "servicemap": { 2026-03-10T13:04:28.552 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T13:04:28.552 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "modified": "2026-03-10T13:04:26.320374+0000", 2026-03-10T13:04:28.552 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "services": {} 2026-03-10T13:04:28.552 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T13:04:28.552 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "progress_events": {} 2026-03-10T13:04:28.552 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: } 2026-03-10T13:04:28.588 INFO:teuthology.orchestra.run.vm00.stderr:mgr not available, waiting (1/15)... 2026-03-10T13:04:28.626 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:28 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/3625287566' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-10T13:04:30.864 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T13:04:30.864 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: { 2026-03-10T13:04:30.864 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "fsid": "98a3dada-1c81-11f1-89c9-d57c120f78d5", 2026-03-10T13:04:30.864 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "health": { 2026-03-10T13:04:30.864 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "status": "HEALTH_OK", 2026-03-10T13:04:30.864 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "checks": {}, 2026-03-10T13:04:30.864 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mutes": [] 2026-03-10T13:04:30.864 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T13:04:30.864 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "election_epoch": 5, 2026-03-10T13:04:30.864 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum": [ 2026-03-10T13:04:30.864 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 0 2026-03-10T13:04:30.864 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-10T13:04:30.864 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum_names": [ 2026-03-10T13:04:30.864 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "a" 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum_age": 3, 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "monmap": { 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "min_mon_release_name": "quincy", 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_mons": 1 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osdmap": { 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_osds": 0, 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_up_osds": 0, 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osd_up_since": 0, 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_in_osds": 0, 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osd_in_since": 0, 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_remapped_pgs": 0 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "pgmap": { 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "pgs_by_state": [], 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_pgs": 0, 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_pools": 0, 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_objects": 0, 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "data_bytes": 0, 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_used": 0, 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_avail": 0, 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_total": 0 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "fsmap": { 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "by_rank": [], 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "up:standby": 0 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mgrmap": { 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "available": false, 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_standbys": 0, 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "modules": [ 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "iostat", 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "nfs", 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "restful" 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "services": {} 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "servicemap": { 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "modified": "2026-03-10T13:04:26.320374+0000", 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "services": {} 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "progress_events": {} 2026-03-10T13:04:30.865 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: } 2026-03-10T13:04:30.901 INFO:teuthology.orchestra.run.vm00.stderr:mgr not available, waiting (2/15)... 2026-03-10T13:04:31.159 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:30 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/856093584' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-10T13:04:33.116 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: { 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "fsid": "98a3dada-1c81-11f1-89c9-d57c120f78d5", 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "health": { 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "status": "HEALTH_OK", 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "checks": {}, 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mutes": [] 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "election_epoch": 5, 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum": [ 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 0 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum_names": [ 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "a" 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum_age": 5, 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "monmap": { 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "min_mon_release_name": "quincy", 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_mons": 1 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osdmap": { 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_osds": 0, 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_up_osds": 0, 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osd_up_since": 0, 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_in_osds": 0, 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osd_in_since": 0, 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_remapped_pgs": 0 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "pgmap": { 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "pgs_by_state": [], 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_pgs": 0, 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_pools": 0, 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_objects": 0, 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "data_bytes": 0, 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_used": 0, 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_avail": 0, 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_total": 0 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "fsmap": { 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "by_rank": [], 2026-03-10T13:04:33.117 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "up:standby": 0 2026-03-10T13:04:33.118 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T13:04:33.118 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mgrmap": { 2026-03-10T13:04:33.118 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "available": false, 2026-03-10T13:04:33.118 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_standbys": 0, 2026-03-10T13:04:33.118 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "modules": [ 2026-03-10T13:04:33.118 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "iostat", 2026-03-10T13:04:33.118 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "nfs", 2026-03-10T13:04:33.118 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "restful" 2026-03-10T13:04:33.118 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-10T13:04:33.118 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "services": {} 2026-03-10T13:04:33.118 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T13:04:33.118 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "servicemap": { 2026-03-10T13:04:33.118 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T13:04:33.118 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "modified": "2026-03-10T13:04:26.320374+0000", 2026-03-10T13:04:33.118 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "services": {} 2026-03-10T13:04:33.118 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T13:04:33.118 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "progress_events": {} 2026-03-10T13:04:33.118 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: } 2026-03-10T13:04:33.157 INFO:teuthology.orchestra.run.vm00.stderr:mgr not available, waiting (3/15)... 2026-03-10T13:04:33.681 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:33 vm00 ceph-mon[47364]: Activating manager daemon y 2026-03-10T13:04:33.681 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:33 vm00 ceph-mon[47364]: mgrmap e2: y(active, starting, since 0.00450241s) 2026-03-10T13:04:33.681 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:33 vm00 ceph-mon[47364]: from='mgr.14100 192.168.123.100:0/2909302338' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T13:04:33.681 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:33 vm00 ceph-mon[47364]: from='mgr.14100 192.168.123.100:0/2909302338' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T13:04:33.681 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:33 vm00 ceph-mon[47364]: from='mgr.14100 192.168.123.100:0/2909302338' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T13:04:33.681 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:33 vm00 ceph-mon[47364]: from='mgr.14100 192.168.123.100:0/2909302338' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T13:04:33.681 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:33 vm00 ceph-mon[47364]: from='mgr.14100 192.168.123.100:0/2909302338' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T13:04:33.681 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:33 vm00 ceph-mon[47364]: from='mgr.14100 192.168.123.100:0/2909302338' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T13:04:33.681 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:33 vm00 ceph-mon[47364]: from='mgr.14100 192.168.123.100:0/2909302338' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:04:33.681 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:33 vm00 ceph-mon[47364]: from='mgr.14100 192.168.123.100:0/2909302338' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T13:04:33.681 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:33 vm00 ceph-mon[47364]: Manager daemon y is now available 2026-03-10T13:04:33.681 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:33 vm00 ceph-mon[47364]: from='mgr.14100 192.168.123.100:0/2909302338' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:04:33.681 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:33 vm00 ceph-mon[47364]: from='mgr.14100 192.168.123.100:0/2909302338' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:04:33.681 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:33 vm00 ceph-mon[47364]: from='mgr.14100 192.168.123.100:0/2909302338' entity='mgr.y' 2026-03-10T13:04:33.681 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:33 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/1504669740' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-10T13:04:33.681 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:33 vm00 ceph-mon[47364]: from='mgr.14100 192.168.123.100:0/2909302338' entity='mgr.y' 2026-03-10T13:04:33.681 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:33 vm00 ceph-mon[47364]: from='mgr.14100 192.168.123.100:0/2909302338' entity='mgr.y' 2026-03-10T13:04:35.194 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:34 vm00 ceph-mon[47364]: mgrmap e3: y(active, since 1.00776s) 2026-03-10T13:04:35.467 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T13:04:35.467 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: { 2026-03-10T13:04:35.467 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "fsid": "98a3dada-1c81-11f1-89c9-d57c120f78d5", 2026-03-10T13:04:35.467 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "health": { 2026-03-10T13:04:35.467 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "status": "HEALTH_OK", 2026-03-10T13:04:35.467 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "checks": {}, 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mutes": [] 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "election_epoch": 5, 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum": [ 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 0 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum_names": [ 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "a" 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "quorum_age": 8, 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "monmap": { 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "min_mon_release_name": "quincy", 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_mons": 1 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osdmap": { 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_osds": 0, 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_up_osds": 0, 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osd_up_since": 0, 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_in_osds": 0, 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "osd_in_since": 0, 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_remapped_pgs": 0 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "pgmap": { 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "pgs_by_state": [], 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_pgs": 0, 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_pools": 0, 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_objects": 0, 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "data_bytes": 0, 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_used": 0, 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_avail": 0, 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "bytes_total": 0 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "fsmap": { 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "by_rank": [], 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "up:standby": 0 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mgrmap": { 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "available": true, 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_standbys": 0, 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "modules": [ 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "iostat", 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "nfs", 2026-03-10T13:04:35.468 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "restful" 2026-03-10T13:04:35.469 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ], 2026-03-10T13:04:35.469 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "services": {} 2026-03-10T13:04:35.469 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T13:04:35.469 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "servicemap": { 2026-03-10T13:04:35.469 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T13:04:35.469 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "modified": "2026-03-10T13:04:26.320374+0000", 2026-03-10T13:04:35.469 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "services": {} 2026-03-10T13:04:35.469 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: }, 2026-03-10T13:04:35.469 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "progress_events": {} 2026-03-10T13:04:35.469 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: } 2026-03-10T13:04:35.501 INFO:teuthology.orchestra.run.vm00.stderr:mgr is available 2026-03-10T13:04:35.789 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T13:04:35.789 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: [global] 2026-03-10T13:04:35.789 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: fsid = 98a3dada-1c81-11f1-89c9-d57c120f78d5 2026-03-10T13:04:35.789 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mon_osd_allow_pg_remap = true 2026-03-10T13:04:35.789 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mon_osd_allow_primary_affinity = true 2026-03-10T13:04:35.789 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mon_warn_on_no_sortbitwise = false 2026-03-10T13:04:35.789 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: osd_crush_chooseleaf_type = 0 2026-03-10T13:04:35.789 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T13:04:35.790 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: [mgr] 2026-03-10T13:04:35.790 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: mgr/telemetry/nag = false 2026-03-10T13:04:35.790 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 2026-03-10T13:04:35.790 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: [osd] 2026-03-10T13:04:35.790 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: osd_map_max_advance = 10 2026-03-10T13:04:35.790 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: osd_mclock_iops_capacity_threshold_hdd = 49000 2026-03-10T13:04:35.790 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: osd_sloppy_crc = true 2026-03-10T13:04:35.816 INFO:teuthology.orchestra.run.vm00.stderr:Enabling cephadm module... 2026-03-10T13:04:36.217 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:36 vm00 ceph-mon[47364]: mgrmap e4: y(active, since 2s) 2026-03-10T13:04:36.221 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:36 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/65468723' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-10T13:04:36.221 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:36 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/3093791311' entity='client.admin' cmd=[{"prefix": "config assimilate-conf"}]: dispatch 2026-03-10T13:04:36.221 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:36 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/3093791311' entity='client.admin' cmd='[{"prefix": "config assimilate-conf"}]': finished 2026-03-10T13:04:37.226 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:37 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/4029675891' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "cephadm"}]: dispatch 2026-03-10T13:04:37.463 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: { 2026-03-10T13:04:37.463 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 5, 2026-03-10T13:04:37.463 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "available": true, 2026-03-10T13:04:37.463 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "active_name": "y", 2026-03-10T13:04:37.463 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_standby": 0 2026-03-10T13:04:37.463 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: } 2026-03-10T13:04:37.499 INFO:teuthology.orchestra.run.vm00.stderr:Waiting for the mgr to restart... 2026-03-10T13:04:37.499 INFO:teuthology.orchestra.run.vm00.stderr:Waiting for mgr epoch 5... 2026-03-10T13:04:38.230 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:38 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/4029675891' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "cephadm"}]': finished 2026-03-10T13:04:38.230 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:38 vm00 ceph-mon[47364]: mgrmap e5: y(active, since 4s) 2026-03-10T13:04:38.230 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:38 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2069851709' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-10T13:04:42.050 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:41 vm00 ceph-mon[47364]: Active manager daemon y restarted 2026-03-10T13:04:42.050 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:41 vm00 ceph-mon[47364]: Activating manager daemon y 2026-03-10T13:04:42.050 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:41 vm00 ceph-mon[47364]: osdmap e2: 0 total, 0 up, 0 in 2026-03-10T13:04:42.851 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:42 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: [10/Mar/2026:13:04:42] ENGINE Bus STARTED 2026-03-10T13:04:42.864 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: { 2026-03-10T13:04:42.864 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mgrmap_epoch": 7, 2026-03-10T13:04:42.864 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "initialized": true 2026-03-10T13:04:42.864 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: } 2026-03-10T13:04:42.937 INFO:teuthology.orchestra.run.vm00.stderr:mgr epoch 5 is available 2026-03-10T13:04:42.937 INFO:teuthology.orchestra.run.vm00.stderr:Setting orchestrator backend to cephadm... 2026-03-10T13:04:43.197 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:42 vm00 ceph-mon[47364]: mgrmap e6: y(active, starting, since 0.0547492s) 2026-03-10T13:04:43.198 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:42 vm00 ceph-mon[47364]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:04:43.198 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:42 vm00 ceph-mon[47364]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T13:04:43.198 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:42 vm00 ceph-mon[47364]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T13:04:43.198 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:42 vm00 ceph-mon[47364]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T13:04:43.198 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:42 vm00 ceph-mon[47364]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T13:04:43.198 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:42 vm00 ceph-mon[47364]: Manager daemon y is now available 2026-03-10T13:04:43.198 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:42 vm00 ceph-mon[47364]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' 2026-03-10T13:04:43.198 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:42 vm00 ceph-mon[47364]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' 2026-03-10T13:04:43.198 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:42 vm00 ceph-mon[47364]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:04:43.198 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:42 vm00 ceph-mon[47364]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:04:43.198 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:42 vm00 ceph-mon[47364]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:04:43.198 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:42 vm00 ceph-mon[47364]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:04:43.198 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:42 vm00 ceph-mon[47364]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:04:43.198 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:42 vm00 ceph-mon[47364]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' 2026-03-10T13:04:43.198 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:42 vm00 ceph-mon[47364]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:04:43.629 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: value unchanged 2026-03-10T13:04:43.658 INFO:teuthology.orchestra.run.vm00.stderr:Generating ssh key... 2026-03-10T13:04:43.966 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:43 vm00 ceph-mon[47364]: [10/Mar/2026:13:04:42] ENGINE Bus STARTING 2026-03-10T13:04:43.966 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:43 vm00 ceph-mon[47364]: [10/Mar/2026:13:04:42] ENGINE Serving on https://192.168.123.100:7150 2026-03-10T13:04:43.966 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:43 vm00 ceph-mon[47364]: [10/Mar/2026:13:04:42] ENGINE Bus STARTED 2026-03-10T13:04:43.966 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:43 vm00 ceph-mon[47364]: from='client.14124 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-10T13:04:43.966 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:43 vm00 ceph-mon[47364]: mgrmap e7: y(active, since 1.06614s) 2026-03-10T13:04:43.966 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:43 vm00 ceph-mon[47364]: from='client.14124 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-10T13:04:43.966 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:43 vm00 ceph-mon[47364]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' 2026-03-10T13:04:43.966 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:43 vm00 ceph-mon[47364]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:04:44.254 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: Generating public/private rsa key pair. 2026-03-10T13:04:44.254 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: Your identification has been saved in /tmp/tmpupm4ksm1/key. 2026-03-10T13:04:44.254 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: Your public key has been saved in /tmp/tmpupm4ksm1/key.pub. 2026-03-10T13:04:44.254 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: The key fingerprint is: 2026-03-10T13:04:44.254 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: SHA256:bjFcIrfhAvkQODBsFk8UFn7+FpX+h5PGwTbtjkXJBaE ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5 2026-03-10T13:04:44.254 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: The key's randomart image is: 2026-03-10T13:04:44.254 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: +---[RSA 3072]----+ 2026-03-10T13:04:44.254 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: |+ooB+ o. | 2026-03-10T13:04:44.254 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: | =B o . . . | 2026-03-10T13:04:44.254 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: |o ++.. +o.E . | 2026-03-10T13:04:44.254 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: | o+ =o=. o o | 2026-03-10T13:04:44.254 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: | .o.S. = = | 2026-03-10T13:04:44.254 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: | .o.o+ B | 2026-03-10T13:04:44.254 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: | oo B + | 2026-03-10T13:04:44.254 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: | .. . * | 2026-03-10T13:04:44.254 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: | . . | 2026-03-10T13:04:44.254 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: +----[SHA256]-----+ 2026-03-10T13:04:44.277 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC95cLy7CSAGimXVjeXCG5+Cgaeeg3AhP15h9DlXvVMddNiJZ4wAiubIS29EY+9CHnXQ+CpJMKzUf6kIuanH1AJRucrUVq77Zs2c4JsRyfZdflKKeCBVrJBI4e1+awH5XylUjhL+fVAOz7DnPo7aJ7+vx30gErpmaGtWOQTmUsw6vE9GP6tDSAQlWpVPqwUxWDWH8z40T2MOUCVQFh8+/ZYjw810ZIdDUcOrJWPkKbKIUoSxIq8c7DDga4LTT0qQr2tp+qBYZHyAlN+/t4cLVU1AieLncMH0qpd6Scdd7yT6au/wD6tJTsOLr5fN3y0in0tzqBU0pKbBTrO6ieLKMNPsvQ/bHlWWyFlg2YALcaTauOBJ8rVVYOm/zkbgwsvJphU01Xuz8VtGl3wtGxqcX1dhTXYBjYlblOphPpNSpzDl+xPC1DL+3vgHfaxjjSAtLmCCg083msl1WRDbGxylsq4VcMEYrh/x2RlhtzD/Umuf+xUUQmAQ8M+AfibnfVJi9s= ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5 2026-03-10T13:04:44.558 INFO:teuthology.orchestra.run.vm00.stderr:Wrote public SSH key to /home/ubuntu/cephtest/ceph.pub 2026-03-10T13:04:44.558 INFO:teuthology.orchestra.run.vm00.stderr:Adding key to root@localhost authorized_keys... 2026-03-10T13:04:44.558 INFO:teuthology.orchestra.run.vm00.stderr:Adding host vm00... 2026-03-10T13:04:45.127 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:44 vm00 ceph-mon[47364]: from='client.14132 -' entity='client.admin' cmd=[{"prefix": "orch set backend", "module_name": "cephadm", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:04:45.127 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:44 vm00 ceph-mon[47364]: from='client.14134 -' entity='client.admin' cmd=[{"prefix": "cephadm set-user", "user": "root", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:04:45.127 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:44 vm00 ceph-mon[47364]: from='client.14136 -' entity='client.admin' cmd=[{"prefix": "cephadm generate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:04:45.127 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:44 vm00 ceph-mon[47364]: Generating ssh key... 2026-03-10T13:04:45.127 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:44 vm00 ceph-mon[47364]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' 2026-03-10T13:04:45.128 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:44 vm00 ceph-mon[47364]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' 2026-03-10T13:04:45.128 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:44 vm00 ceph-mon[47364]: mgrmap e8: y(active, since 2s) 2026-03-10T13:04:45.519 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: Added host 'vm00' with addr '192.168.123.100' 2026-03-10T13:04:45.587 INFO:teuthology.orchestra.run.vm00.stderr:Deploying unmanaged mon service... 2026-03-10T13:04:46.271 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: Scheduled mon update... 2026-03-10T13:04:46.302 INFO:teuthology.orchestra.run.vm00.stderr:Deploying unmanaged mgr service... 2026-03-10T13:04:46.560 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:46 vm00 ceph-mon[47364]: from='client.14138 -' entity='client.admin' cmd=[{"prefix": "cephadm get-pub-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:04:46.560 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:46 vm00 ceph-mon[47364]: from='client.14140 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm00", "addr": "192.168.123.100", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:04:46.560 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:46 vm00 ceph-mon[47364]: Deploying cephadm binary to vm00 2026-03-10T13:04:46.560 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:46 vm00 ceph-mon[47364]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' 2026-03-10T13:04:46.560 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:46 vm00 ceph-mon[47364]: Added host vm00 2026-03-10T13:04:46.560 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:46 vm00 ceph-mon[47364]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:04:46.560 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:46 vm00 ceph-mon[47364]: from='client.14142 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "unmanaged": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:04:46.560 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:46 vm00 ceph-mon[47364]: Saving service mon spec with placement count:5 2026-03-10T13:04:46.560 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: Scheduled mgr update... 2026-03-10T13:04:47.169 INFO:teuthology.orchestra.run.vm00.stderr:Enabling the dashboard module... 2026-03-10T13:04:47.404 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:47 vm00 ceph-mon[47364]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' 2026-03-10T13:04:47.404 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:47 vm00 ceph-mon[47364]: from='client.14144 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "unmanaged": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:04:47.404 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:47 vm00 ceph-mon[47364]: Saving service mgr spec with placement count:2 2026-03-10T13:04:47.404 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:47 vm00 ceph-mon[47364]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' 2026-03-10T13:04:47.404 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:47 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/3322909269' entity='client.admin' 2026-03-10T13:04:47.404 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:47 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2687139842' entity='client.admin' 2026-03-10T13:04:48.581 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:48 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2215162243' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "dashboard"}]: dispatch 2026-03-10T13:04:48.581 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:48 vm00 ceph-mon[47364]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' 2026-03-10T13:04:48.581 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:48 vm00 ceph-mon[47364]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' 2026-03-10T13:04:48.581 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:48 vm00 ceph-mon[47364]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' 2026-03-10T13:04:48.581 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ignoring --setuser ceph since I am not root 2026-03-10T13:04:48.581 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ignoring --setgroup ceph since I am not root 2026-03-10T13:04:48.581 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:04:48.515+0000 7f5a6a317000 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-10T13:04:48.581 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:04:48.577+0000 7f5a6a317000 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T13:04:48.726 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: { 2026-03-10T13:04:48.726 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "epoch": 9, 2026-03-10T13:04:48.726 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "available": true, 2026-03-10T13:04:48.726 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "active_name": "y", 2026-03-10T13:04:48.726 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "num_standby": 0 2026-03-10T13:04:48.726 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: } 2026-03-10T13:04:48.763 INFO:teuthology.orchestra.run.vm00.stderr:Waiting for the mgr to restart... 2026-03-10T13:04:48.763 INFO:teuthology.orchestra.run.vm00.stderr:Waiting for mgr epoch 9... 2026-03-10T13:04:49.253 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:49 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:04:49.055+0000 7f5a6a317000 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T13:04:49.597 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:49 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2215162243' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "dashboard"}]': finished 2026-03-10T13:04:49.597 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:49 vm00 ceph-mon[47364]: mgrmap e9: y(active, since 6s) 2026-03-10T13:04:49.597 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:49 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/405074470' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-10T13:04:49.597 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:49 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:04:49.441+0000 7f5a6a317000 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T13:04:49.597 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:49 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:04:49.594+0000 7f5a6a317000 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T13:04:50.003 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:49 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:04:49.650+0000 7f5a6a317000 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T13:04:50.003 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:49 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:04:49.831+0000 7f5a6a317000 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T13:04:50.699 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:50 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:04:50.438+0000 7f5a6a317000 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T13:04:50.700 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:50 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:04:50.628+0000 7f5a6a317000 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T13:04:50.700 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:50 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:04:50.697+0000 7f5a6a317000 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T13:04:51.003 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:50 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:04:50.758+0000 7f5a6a317000 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T13:04:51.003 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:50 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:04:50.823+0000 7f5a6a317000 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T13:04:51.003 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:50 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:04:50.882+0000 7f5a6a317000 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T13:04:51.504 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:51 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:04:51.193+0000 7f5a6a317000 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T13:04:51.504 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:51 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:04:51.267+0000 7f5a6a317000 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T13:04:52.091 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:51 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:04:51.836+0000 7f5a6a317000 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T13:04:52.091 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:51 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:04:51.900+0000 7f5a6a317000 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T13:04:52.091 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:51 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:04:51.967+0000 7f5a6a317000 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T13:04:52.390 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:52 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:04:52.131+0000 7f5a6a317000 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T13:04:52.391 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:52 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:04:52.195+0000 7f5a6a317000 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T13:04:52.391 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:52 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:04:52.294+0000 7f5a6a317000 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T13:04:52.721 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:52 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:04:52.388+0000 7f5a6a317000 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T13:04:53.003 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:52 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:04:52.718+0000 7f5a6a317000 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T13:04:53.003 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:52 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:04:52.782+0000 7f5a6a317000 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T13:04:53.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:53 vm00 ceph-mon[47364]: Active manager daemon y restarted 2026-03-10T13:04:53.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:53 vm00 ceph-mon[47364]: Activating manager daemon y 2026-03-10T13:04:53.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:53 vm00 ceph-mon[47364]: osdmap e3: 0 total, 0 up, 0 in 2026-03-10T13:04:54.306 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:54 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: [10/Mar/2026:13:04:54] ENGINE Bus STARTING 2026-03-10T13:04:54.306 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:54 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: [10/Mar/2026:13:04:54] ENGINE Serving on https://192.168.123.100:7150 2026-03-10T13:04:54.306 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:04:54 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: [10/Mar/2026:13:04:54] ENGINE Bus STARTED 2026-03-10T13:04:54.520 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: { 2026-03-10T13:04:54.520 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "mgrmap_epoch": 11, 2026-03-10T13:04:54.521 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: "initialized": true 2026-03-10T13:04:54.521 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: } 2026-03-10T13:04:54.557 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:54 vm00 ceph-mon[47364]: mgrmap e10: y(active, starting, since 0.714845s) 2026-03-10T13:04:54.557 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:54 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:04:54.558 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:54 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T13:04:54.558 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:54 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T13:04:54.558 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:54 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T13:04:54.558 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:54 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T13:04:54.558 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:54 vm00 ceph-mon[47364]: Manager daemon y is now available 2026-03-10T13:04:54.558 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:54 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:04:54.558 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:54 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:04:54.558 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:54 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:04:54.558 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:54 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:04:54.558 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:54 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:04:54.558 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:54 vm00 ceph-mon[47364]: [10/Mar/2026:13:04:54] ENGINE Bus STARTING 2026-03-10T13:04:54.558 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:54 vm00 ceph-mon[47364]: [10/Mar/2026:13:04:54] ENGINE Serving on https://192.168.123.100:7150 2026-03-10T13:04:54.558 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:54 vm00 ceph-mon[47364]: [10/Mar/2026:13:04:54] ENGINE Bus STARTED 2026-03-10T13:04:54.558 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:54 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:04:54.565 INFO:teuthology.orchestra.run.vm00.stderr:mgr epoch 9 is available 2026-03-10T13:04:54.565 INFO:teuthology.orchestra.run.vm00.stderr:Generating a dashboard self-signed certificate... 2026-03-10T13:04:55.106 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: Self-signed certificate created 2026-03-10T13:04:55.148 INFO:teuthology.orchestra.run.vm00.stderr:Creating initial admin user... 2026-03-10T13:04:55.574 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:55 vm00 ceph-mon[47364]: mgrmap e11: y(active, since 1.71742s) 2026-03-10T13:04:55.574 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:55 vm00 ceph-mon[47364]: from='client.14156 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-10T13:04:55.574 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:55 vm00 ceph-mon[47364]: from='client.14156 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-10T13:04:55.574 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:55 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:04:55.574 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:55 vm00 ceph-mon[47364]: from='client.14164 -' entity='client.admin' cmd=[{"prefix": "dashboard create-self-signed-cert", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:04:55.574 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:55 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:04:55.574 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:55 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:04:55.574 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: {"username": "admin", "password": "$2b$12$NxYK1LIIzMQd.xcjMFMr9uU5FjAfdbOFlGDtxuqb5zdlV07m7k7tm", "roles": ["administrator"], "name": null, "email": null, "lastUpdate": 1773147895, "enabled": true, "pwdExpirationDate": null, "pwdUpdateRequired": true} 2026-03-10T13:04:55.626 INFO:teuthology.orchestra.run.vm00.stderr:Fetching dashboard port number... 2026-03-10T13:04:55.863 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: 8443 2026-03-10T13:04:55.916 INFO:teuthology.orchestra.run.vm00.stderr:firewalld does not appear to be present 2026-03-10T13:04:55.916 INFO:teuthology.orchestra.run.vm00.stderr:Not possible to open ports <[8443]>. firewalld.service is not available 2026-03-10T13:04:55.917 INFO:teuthology.orchestra.run.vm00.stderr:Ceph Dashboard is now available at: 2026-03-10T13:04:55.917 INFO:teuthology.orchestra.run.vm00.stderr: 2026-03-10T13:04:55.917 INFO:teuthology.orchestra.run.vm00.stderr: URL: https://vm00.local:8443/ 2026-03-10T13:04:55.917 INFO:teuthology.orchestra.run.vm00.stderr: User: admin 2026-03-10T13:04:55.917 INFO:teuthology.orchestra.run.vm00.stderr: Password: f83hlqt6o2 2026-03-10T13:04:55.917 INFO:teuthology.orchestra.run.vm00.stderr: 2026-03-10T13:04:55.917 INFO:teuthology.orchestra.run.vm00.stderr:Enabling autotune for osd_memory_target 2026-03-10T13:04:56.516 INFO:teuthology.orchestra.run.vm00.stderr:/usr/bin/ceph: set mgr/dashboard/cluster/status 2026-03-10T13:04:56.569 INFO:teuthology.orchestra.run.vm00.stderr:You can access the Ceph CLI with: 2026-03-10T13:04:56.569 INFO:teuthology.orchestra.run.vm00.stderr: 2026-03-10T13:04:56.569 INFO:teuthology.orchestra.run.vm00.stderr: sudo /home/ubuntu/cephtest/cephadm shell --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring 2026-03-10T13:04:56.569 INFO:teuthology.orchestra.run.vm00.stderr: 2026-03-10T13:04:56.569 INFO:teuthology.orchestra.run.vm00.stderr:Please consider enabling telemetry to help improve Ceph: 2026-03-10T13:04:56.569 INFO:teuthology.orchestra.run.vm00.stderr: 2026-03-10T13:04:56.569 INFO:teuthology.orchestra.run.vm00.stderr: ceph telemetry on 2026-03-10T13:04:56.569 INFO:teuthology.orchestra.run.vm00.stderr: 2026-03-10T13:04:56.569 INFO:teuthology.orchestra.run.vm00.stderr:For more information see: 2026-03-10T13:04:56.569 INFO:teuthology.orchestra.run.vm00.stderr: 2026-03-10T13:04:56.569 INFO:teuthology.orchestra.run.vm00.stderr: https://docs.ceph.com/docs/master/mgr/telemetry/ 2026-03-10T13:04:56.569 INFO:teuthology.orchestra.run.vm00.stderr: 2026-03-10T13:04:56.569 INFO:teuthology.orchestra.run.vm00.stderr:Bootstrap complete. 2026-03-10T13:04:56.595 INFO:tasks.cephadm:Fetching config... 2026-03-10T13:04:56.595 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-10T13:04:56.595 DEBUG:teuthology.orchestra.run.vm00:> dd if=/etc/ceph/ceph.conf of=/dev/stdout 2026-03-10T13:04:56.612 INFO:tasks.cephadm:Fetching client.admin keyring... 2026-03-10T13:04:56.612 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-10T13:04:56.612 DEBUG:teuthology.orchestra.run.vm00:> dd if=/etc/ceph/ceph.client.admin.keyring of=/dev/stdout 2026-03-10T13:04:56.689 INFO:tasks.cephadm:Fetching mon keyring... 2026-03-10T13:04:56.689 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-10T13:04:56.689 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/mon.a/keyring of=/dev/stdout 2026-03-10T13:04:56.766 INFO:tasks.cephadm:Fetching pub ssh key... 2026-03-10T13:04:56.766 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-10T13:04:56.766 DEBUG:teuthology.orchestra.run.vm00:> dd if=/home/ubuntu/cephtest/ceph.pub of=/dev/stdout 2026-03-10T13:04:56.824 INFO:tasks.cephadm:Installing pub ssh key for root users... 2026-03-10T13:04:56.824 DEBUG:teuthology.orchestra.run.vm00:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC95cLy7CSAGimXVjeXCG5+Cgaeeg3AhP15h9DlXvVMddNiJZ4wAiubIS29EY+9CHnXQ+CpJMKzUf6kIuanH1AJRucrUVq77Zs2c4JsRyfZdflKKeCBVrJBI4e1+awH5XylUjhL+fVAOz7DnPo7aJ7+vx30gErpmaGtWOQTmUsw6vE9GP6tDSAQlWpVPqwUxWDWH8z40T2MOUCVQFh8+/ZYjw810ZIdDUcOrJWPkKbKIUoSxIq8c7DDga4LTT0qQr2tp+qBYZHyAlN+/t4cLVU1AieLncMH0qpd6Scdd7yT6au/wD6tJTsOLr5fN3y0in0tzqBU0pKbBTrO6ieLKMNPsvQ/bHlWWyFlg2YALcaTauOBJ8rVVYOm/zkbgwsvJphU01Xuz8VtGl3wtGxqcX1dhTXYBjYlblOphPpNSpzDl+xPC1DL+3vgHfaxjjSAtLmCCg083msl1WRDbGxylsq4VcMEYrh/x2RlhtzD/Umuf+xUUQmAQ8M+AfibnfVJi9s= ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-10T13:04:56.901 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:56 vm00 ceph-mon[47364]: from='client.14166 -' entity='client.admin' cmd=[{"prefix": "dashboard ac-user-create", "username": "admin", "rolename": "administrator", "force_password": true, "pwd_update_required": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:04:56.901 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:56 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:04:56.901 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:56 vm00 ceph-mon[47364]: mgrmap e12: y(active, since 2s) 2026-03-10T13:04:56.901 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:56 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/1577274612' entity='client.admin' cmd=[{"prefix": "config get", "who": "mgr", "key": "mgr/dashboard/ssl_server_port"}]: dispatch 2026-03-10T13:04:56.901 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:56 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2392227538' entity='client.admin' 2026-03-10T13:04:56.917 INFO:teuthology.orchestra.run.vm00.stdout:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC95cLy7CSAGimXVjeXCG5+Cgaeeg3AhP15h9DlXvVMddNiJZ4wAiubIS29EY+9CHnXQ+CpJMKzUf6kIuanH1AJRucrUVq77Zs2c4JsRyfZdflKKeCBVrJBI4e1+awH5XylUjhL+fVAOz7DnPo7aJ7+vx30gErpmaGtWOQTmUsw6vE9GP6tDSAQlWpVPqwUxWDWH8z40T2MOUCVQFh8+/ZYjw810ZIdDUcOrJWPkKbKIUoSxIq8c7DDga4LTT0qQr2tp+qBYZHyAlN+/t4cLVU1AieLncMH0qpd6Scdd7yT6au/wD6tJTsOLr5fN3y0in0tzqBU0pKbBTrO6ieLKMNPsvQ/bHlWWyFlg2YALcaTauOBJ8rVVYOm/zkbgwsvJphU01Xuz8VtGl3wtGxqcX1dhTXYBjYlblOphPpNSpzDl+xPC1DL+3vgHfaxjjSAtLmCCg083msl1WRDbGxylsq4VcMEYrh/x2RlhtzD/Umuf+xUUQmAQ8M+AfibnfVJi9s= ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5 2026-03-10T13:04:56.928 DEBUG:teuthology.orchestra.run.vm08:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC95cLy7CSAGimXVjeXCG5+Cgaeeg3AhP15h9DlXvVMddNiJZ4wAiubIS29EY+9CHnXQ+CpJMKzUf6kIuanH1AJRucrUVq77Zs2c4JsRyfZdflKKeCBVrJBI4e1+awH5XylUjhL+fVAOz7DnPo7aJ7+vx30gErpmaGtWOQTmUsw6vE9GP6tDSAQlWpVPqwUxWDWH8z40T2MOUCVQFh8+/ZYjw810ZIdDUcOrJWPkKbKIUoSxIq8c7DDga4LTT0qQr2tp+qBYZHyAlN+/t4cLVU1AieLncMH0qpd6Scdd7yT6au/wD6tJTsOLr5fN3y0in0tzqBU0pKbBTrO6ieLKMNPsvQ/bHlWWyFlg2YALcaTauOBJ8rVVYOm/zkbgwsvJphU01Xuz8VtGl3wtGxqcX1dhTXYBjYlblOphPpNSpzDl+xPC1DL+3vgHfaxjjSAtLmCCg083msl1WRDbGxylsq4VcMEYrh/x2RlhtzD/Umuf+xUUQmAQ8M+AfibnfVJi9s= ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-10T13:04:56.971 INFO:teuthology.orchestra.run.vm08.stdout:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC95cLy7CSAGimXVjeXCG5+Cgaeeg3AhP15h9DlXvVMddNiJZ4wAiubIS29EY+9CHnXQ+CpJMKzUf6kIuanH1AJRucrUVq77Zs2c4JsRyfZdflKKeCBVrJBI4e1+awH5XylUjhL+fVAOz7DnPo7aJ7+vx30gErpmaGtWOQTmUsw6vE9GP6tDSAQlWpVPqwUxWDWH8z40T2MOUCVQFh8+/ZYjw810ZIdDUcOrJWPkKbKIUoSxIq8c7DDga4LTT0qQr2tp+qBYZHyAlN+/t4cLVU1AieLncMH0qpd6Scdd7yT6au/wD6tJTsOLr5fN3y0in0tzqBU0pKbBTrO6ieLKMNPsvQ/bHlWWyFlg2YALcaTauOBJ8rVVYOm/zkbgwsvJphU01Xuz8VtGl3wtGxqcX1dhTXYBjYlblOphPpNSpzDl+xPC1DL+3vgHfaxjjSAtLmCCg083msl1WRDbGxylsq4VcMEYrh/x2RlhtzD/Umuf+xUUQmAQ8M+AfibnfVJi9s= ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5 2026-03-10T13:04:56.983 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph config set mgr mgr/cephadm/allow_ptrace true 2026-03-10T13:04:57.698 INFO:tasks.cephadm:Distributing conf and client.admin keyring to all hosts + 0755 2026-03-10T13:04:57.699 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph orch client-keyring set client.admin '*' --mode 0755 2026-03-10T13:04:58.245 INFO:tasks.cephadm:Writing (initial) conf and keyring to vm08 2026-03-10T13:04:58.245 DEBUG:teuthology.orchestra.run.vm08:> set -ex 2026-03-10T13:04:58.245 DEBUG:teuthology.orchestra.run.vm08:> dd of=/etc/ceph/ceph.conf 2026-03-10T13:04:58.269 DEBUG:teuthology.orchestra.run.vm08:> set -ex 2026-03-10T13:04:58.269 DEBUG:teuthology.orchestra.run.vm08:> dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:04:58.331 INFO:tasks.cephadm:Adding host vm08 to orchestrator... 2026-03-10T13:04:58.332 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph orch host add vm08 2026-03-10T13:04:58.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:58 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:04:58.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:58 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:04:58.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:58 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:04:58.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:58 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:04:58.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:58 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:04:58.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:58 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/1919702487' entity='client.admin' 2026-03-10T13:04:58.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:58 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:04:58.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:58 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:04:58.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:58 vm00 ceph-mon[47364]: from='client.14176 -' entity='client.admin' cmd=[{"prefix": "orch client-keyring set", "entity": "client.admin", "placement": "*", "mode": "0755", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:04:58.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:58 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:04:58.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:58 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:04:58.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:58 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:04:58.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:58 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:04:58.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:58 vm00 ceph-mon[47364]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T13:04:59.732 INFO:teuthology.orchestra.run.vm00.stdout:Added host 'vm08' with addr '192.168.123.108' 2026-03-10T13:04:59.790 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph orch host ls --format=json 2026-03-10T13:05:00.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:59 vm00 ceph-mon[47364]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:05:00.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:59 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:00.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:59 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:00.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:59 vm00 ceph-mon[47364]: from='client.14178 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm08", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:05:00.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:04:59 vm00 ceph-mon[47364]: Deploying cephadm binary to vm08 2026-03-10T13:05:00.585 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T13:05:00.585 INFO:teuthology.orchestra.run.vm00.stdout:[{"addr": "192.168.123.100", "hostname": "vm00", "labels": [], "status": ""}, {"addr": "192.168.123.108", "hostname": "vm08", "labels": [], "status": ""}] 2026-03-10T13:05:00.636 INFO:tasks.cephadm:Setting crush tunables to default 2026-03-10T13:05:00.636 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph osd crush tunables default 2026-03-10T13:05:00.861 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:00 vm00 ceph-mon[47364]: mgrmap e13: y(active, since 6s) 2026-03-10T13:05:00.862 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:00 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:00.862 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:00 vm00 ceph-mon[47364]: Added host vm08 2026-03-10T13:05:00.862 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:00 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:00.862 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:00 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:00.862 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:00 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:05:00.862 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:00 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:00.862 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:00 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:01.967 INFO:teuthology.orchestra.run.vm00.stderr:adjusted tunables profile to default 2026-03-10T13:05:02.009 INFO:tasks.cephadm:Adding mon.a on vm00 2026-03-10T13:05:02.009 INFO:tasks.cephadm:Adding mon.c on vm00 2026-03-10T13:05:02.009 INFO:tasks.cephadm:Adding mon.b on vm08 2026-03-10T13:05:02.010 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph orch apply mon '3;vm00:192.168.123.100=a;vm00:[v2:192.168.123.100:3301,v1:192.168.123.100:6790]=c;vm08:192.168.123.108=b' 2026-03-10T13:05:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:01 vm00 ceph-mon[47364]: from='client.14180 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T13:05:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:01 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/1609399031' entity='client.admin' cmd=[{"prefix": "osd crush tunables", "profile": "default"}]: dispatch 2026-03-10T13:05:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:01 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:02.907 INFO:teuthology.orchestra.run.vm08.stdout:Scheduled mon update... 2026-03-10T13:05:02.972 DEBUG:teuthology.orchestra.run.vm00:mon.c> sudo journalctl -f -n 0 -u ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mon.c.service 2026-03-10T13:05:02.973 DEBUG:teuthology.orchestra.run.vm08:mon.b> sudo journalctl -f -n 0 -u ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mon.b.service 2026-03-10T13:05:02.976 INFO:tasks.cephadm:Waiting for 3 mons in monmap... 2026-03-10T13:05:02.976 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph mon dump -f json 2026-03-10T13:05:03.207 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:02 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/1609399031' entity='client.admin' cmd='[{"prefix": "osd crush tunables", "profile": "default"}]': finished 2026-03-10T13:05:03.208 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:02 vm00 ceph-mon[47364]: osdmap e4: 0 total, 0 up, 0 in 2026-03-10T13:05:03.208 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:02 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:03.750 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-10T13:05:03.750 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"98a3dada-1c81-11f1-89c9-d57c120f78d5","modified":"2026-03-10T13:04:24.045109Z","created":"2026-03-10T13:04:24.045109Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T13:05:03.759 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-10T13:05:04.301 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[47364]: from='client.14184 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "placement": "3;vm00:192.168.123.100=a;vm00:[v2:192.168.123.100:3301,v1:192.168.123.100:6790]=c;vm08:192.168.123.108=b", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:05:04.302 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[47364]: Saving service mon spec with placement vm00:192.168.123.100=a;vm00:[v2:192.168.123.100:3301,v1:192.168.123.100:6790]=c;vm08:192.168.123.108=b;count:3 2026-03-10T13:05:04.302 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:04.302 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:04.302 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:05:04.302 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:04.302 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T13:05:04.302 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:04.302 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[47364]: Deploying daemon mon.c on vm00 2026-03-10T13:05:04.302 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[47364]: from='client.? 192.168.123.108:0/609432322' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T13:05:04.302 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: mon.c@-1(???) e0 preinit fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 2026-03-10T13:05:04.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: mon.c@-1(synchronizing).mds e1 new map 2026-03-10T13:05:04.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: mon.c@-1(synchronizing).mds e1 print_map 2026-03-10T13:05:04.754 INFO:journalctl@ceph.mon.c.vm00.stdout: e1 2026-03-10T13:05:04.754 INFO:journalctl@ceph.mon.c.vm00.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-03-10T13:05:04.754 INFO:journalctl@ceph.mon.c.vm00.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2} 2026-03-10T13:05:04.754 INFO:journalctl@ceph.mon.c.vm00.stdout: legacy client fscid: -1 2026-03-10T13:05:04.754 INFO:journalctl@ceph.mon.c.vm00.stdout: 2026-03-10T13:05:04.754 INFO:journalctl@ceph.mon.c.vm00.stdout: No filesystems configured 2026-03-10T13:05:04.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: mon.c@-1(synchronizing).osd e0 _set_cache_ratios kv ratio 0.25 inc ratio 0.375 full ratio 0.375 2026-03-10T13:05:04.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: mon.c@-1(synchronizing).osd e0 register_cache_with_pcm pcm target: 2147483648 pcm max: 1020054732 pcm min: 134217728 inc_osd_cache size: 1 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: mon.c@-1(synchronizing).osd e1 e1: 0 total, 0 up, 0 in 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: mon.c@-1(synchronizing).osd e2 e2: 0 total, 0 up, 0 in 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: mon.c@-1(synchronizing).osd e3 e3: 0 total, 0 up, 0 in 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: mon.c@-1(synchronizing).osd e4 e4: 0 total, 0 up, 0 in 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: mon.c@-1(synchronizing).osd e4 crush map has features 3314932999778484224, adjusting msgr requires 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: mon.c@-1(synchronizing).osd e4 crush map has features 288514050185494528, adjusting msgr requires 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: mon.c@-1(synchronizing).osd e4 crush map has features 288514050185494528, adjusting msgr requires 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: mon.c@-1(synchronizing).osd e4 crush map has features 288514050185494528, adjusting msgr requires 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: mkfs 98a3dada-1c81-11f1-89c9-d57c120f78d5 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: mon.a is new leader, mons a in quorum (ranks 0) 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: mon.a is new leader, mons a in quorum (ranks 0) 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: monmap e1: 1 mons at {a=[v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0]} 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: fsmap 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: osdmap e1: 0 total, 0 up, 0 in 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: mgrmap e1: no daemons active 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/1811580163' entity='client.admin' 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/3625287566' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/856093584' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: Activating manager daemon y 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: mgrmap e2: y(active, starting, since 0.00450241s) 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14100 192.168.123.100:0/2909302338' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14100 192.168.123.100:0/2909302338' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14100 192.168.123.100:0/2909302338' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14100 192.168.123.100:0/2909302338' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14100 192.168.123.100:0/2909302338' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14100 192.168.123.100:0/2909302338' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14100 192.168.123.100:0/2909302338' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14100 192.168.123.100:0/2909302338' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: Manager daemon y is now available 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14100 192.168.123.100:0/2909302338' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14100 192.168.123.100:0/2909302338' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14100 192.168.123.100:0/2909302338' entity='mgr.y' 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/1504669740' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14100 192.168.123.100:0/2909302338' entity='mgr.y' 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14100 192.168.123.100:0/2909302338' entity='mgr.y' 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: mgrmap e3: y(active, since 1.00776s) 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: mgrmap e4: y(active, since 2s) 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/65468723' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/3093791311' entity='client.admin' cmd=[{"prefix": "config assimilate-conf"}]: dispatch 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/3093791311' entity='client.admin' cmd='[{"prefix": "config assimilate-conf"}]': finished 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/4029675891' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "cephadm"}]: dispatch 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/4029675891' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "cephadm"}]': finished 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: mgrmap e5: y(active, since 4s) 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2069851709' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: Active manager daemon y restarted 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: Activating manager daemon y 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: osdmap e2: 0 total, 0 up, 0 in 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: mgrmap e6: y(active, starting, since 0.0547492s) 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:05:04.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: Manager daemon y is now available 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: [10/Mar/2026:13:04:42] ENGINE Bus STARTING 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: [10/Mar/2026:13:04:42] ENGINE Serving on https://192.168.123.100:7150 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: [10/Mar/2026:13:04:42] ENGINE Bus STARTED 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='client.14124 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: mgrmap e7: y(active, since 1.06614s) 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='client.14124 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='client.14132 -' entity='client.admin' cmd=[{"prefix": "orch set backend", "module_name": "cephadm", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='client.14134 -' entity='client.admin' cmd=[{"prefix": "cephadm set-user", "user": "root", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='client.14136 -' entity='client.admin' cmd=[{"prefix": "cephadm generate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: Generating ssh key... 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: mgrmap e8: y(active, since 2s) 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='client.14138 -' entity='client.admin' cmd=[{"prefix": "cephadm get-pub-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='client.14140 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm00", "addr": "192.168.123.100", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: Deploying cephadm binary to vm00 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: Added host vm00 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='client.14142 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "unmanaged": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: Saving service mon spec with placement count:5 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='client.14144 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "unmanaged": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: Saving service mgr spec with placement count:2 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/3322909269' entity='client.admin' 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2687139842' entity='client.admin' 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2215162243' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "dashboard"}]: dispatch 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14120 192.168.123.100:0/3079979174' entity='mgr.y' 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2215162243' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "dashboard"}]': finished 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: mgrmap e9: y(active, since 6s) 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/405074470' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: Active manager daemon y restarted 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: Activating manager daemon y 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: osdmap e3: 0 total, 0 up, 0 in 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: mgrmap e10: y(active, starting, since 0.714845s) 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T13:05:04.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: Manager daemon y is now available 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: [10/Mar/2026:13:04:54] ENGINE Bus STARTING 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: [10/Mar/2026:13:04:54] ENGINE Serving on https://192.168.123.100:7150 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: [10/Mar/2026:13:04:54] ENGINE Bus STARTED 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: mgrmap e11: y(active, since 1.71742s) 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='client.14156 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='client.14156 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='client.14164 -' entity='client.admin' cmd=[{"prefix": "dashboard create-self-signed-cert", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='client.14166 -' entity='client.admin' cmd=[{"prefix": "dashboard ac-user-create", "username": "admin", "rolename": "administrator", "force_password": true, "pwd_update_required": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: mgrmap e12: y(active, since 2s) 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/1577274612' entity='client.admin' cmd=[{"prefix": "config get", "who": "mgr", "key": "mgr/dashboard/ssl_server_port"}]: dispatch 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2392227538' entity='client.admin' 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/1919702487' entity='client.admin' 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='client.14176 -' entity='client.admin' cmd=[{"prefix": "orch client-keyring set", "entity": "client.admin", "placement": "*", "mode": "0755", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='client.14178 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm08", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: Deploying cephadm binary to vm08 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: mgrmap e13: y(active, since 6s) 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: Added host vm08 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='client.14180 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/1609399031' entity='client.admin' cmd=[{"prefix": "osd crush tunables", "profile": "default"}]: dispatch 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/1609399031' entity='client.admin' cmd='[{"prefix": "osd crush tunables", "profile": "default"}]': finished 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: osdmap e4: 0 total, 0 up, 0 in 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='client.14184 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "placement": "3;vm00:192.168.123.100=a;vm00:[v2:192.168.123.100:3301,v1:192.168.123.100:6790]=c;vm08:192.168.123.108=b", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: Saving service mon spec with placement vm00:192.168.123.100=a;vm00:[v2:192.168.123.100:3301,v1:192.168.123.100:6790]=c;vm08:192.168.123.108=b;count:3 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: Deploying daemon mon.c on vm00 2026-03-10T13:05:04.757 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: from='client.? 192.168.123.108:0/609432322' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T13:05:04.758 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: mon.c@-1(synchronizing).paxosservice(auth 1..3) refresh upgraded, format 0 -> 3 2026-03-10T13:05:04.758 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: expand_channel_meta expand map: {default=false} 2026-03-10T13:05:04.758 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: expand_channel_meta from 'false' to 'false' 2026-03-10T13:05:04.758 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: expand_channel_meta expanded map: {default=false} 2026-03-10T13:05:04.758 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: expand_channel_meta expand map: {default=info} 2026-03-10T13:05:04.758 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: expand_channel_meta from 'info' to 'info' 2026-03-10T13:05:04.758 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: expand_channel_meta expanded map: {default=info} 2026-03-10T13:05:04.758 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: expand_channel_meta expand map: {default=daemon} 2026-03-10T13:05:04.758 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: expand_channel_meta from 'daemon' to 'daemon' 2026-03-10T13:05:04.758 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: expand_channel_meta expanded map: {default=daemon} 2026-03-10T13:05:04.758 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: expand_channel_meta expand map: {default=debug} 2026-03-10T13:05:04.758 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: expand_channel_meta from 'debug' to 'debug' 2026-03-10T13:05:04.758 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: expand_channel_meta expanded map: {default=debug} 2026-03-10T13:05:04.758 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:04 vm00 ceph-mon[51670]: mon.c@-1(synchronizing) e1 handle_conf_change mon_allow_pool_delete,mon_cluster_log_to_file 2026-03-10T13:05:04.916 INFO:tasks.cephadm:Waiting for 3 mons in monmap... 2026-03-10T13:05:04.917 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph mon dump -f json 2026-03-10T13:05:09.657 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[47364]: Deploying daemon mon.b on vm08 2026-03-10T13:05:09.657 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:05:09.657 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[47364]: mon.a calling monitor election 2026-03-10T13:05:09.657 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:05:09.657 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:05:09.657 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[47364]: mon.c calling monitor election 2026-03-10T13:05:09.657 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:05:09.657 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:05:09.657 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:05:09.657 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:05:09.657 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:05:09.657 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:05:09.657 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[47364]: mon.a is new leader, mons a,c in quorum (ranks 0,1) 2026-03-10T13:05:09.657 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[47364]: monmap e2: 2 mons at {a=[v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0],c=[v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0]} 2026-03-10T13:05:09.657 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[47364]: fsmap 2026-03-10T13:05:09.657 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[47364]: osdmap e4: 0 total, 0 up, 0 in 2026-03-10T13:05:09.657 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[47364]: mgrmap e13: y(active, since 16s) 2026-03-10T13:05:09.657 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[47364]: overall HEALTH_OK 2026-03-10T13:05:09.657 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:09.657 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:09.657 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:09.657 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:05:09.658 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[51670]: Deploying daemon mon.b on vm08 2026-03-10T13:05:09.658 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:05:09.658 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[51670]: mon.a calling monitor election 2026-03-10T13:05:09.658 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:05:09.658 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:05:09.658 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[51670]: mon.c calling monitor election 2026-03-10T13:05:09.658 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:05:09.658 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:05:09.658 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:05:09.658 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:05:09.658 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:05:09.658 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:05:09.658 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[51670]: mon.a is new leader, mons a,c in quorum (ranks 0,1) 2026-03-10T13:05:09.658 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[51670]: monmap e2: 2 mons at {a=[v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0],c=[v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0]} 2026-03-10T13:05:09.658 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[51670]: fsmap 2026-03-10T13:05:09.658 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[51670]: osdmap e4: 0 total, 0 up, 0 in 2026-03-10T13:05:09.658 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[51670]: mgrmap e13: y(active, since 16s) 2026-03-10T13:05:09.658 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[51670]: overall HEALTH_OK 2026-03-10T13:05:09.658 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:09.658 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:09.658 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:09.658 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:09 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:05:09.829 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-10T13:05:09.829 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":2,"fsid":"98a3dada-1c81-11f1-89c9-d57c120f78d5","modified":"2026-03-10T13:05:04.309858Z","created":"2026-03-10T13:04:24.045109Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"},{"rank":1,"name":"c","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3301","nonce":0},{"type":"v1","addr":"192.168.123.100:6790","nonce":0}]},"addr":"192.168.123.100:6790/0","public_addr":"192.168.123.100:6790/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0,1]} 2026-03-10T13:05:09.837 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 2 2026-03-10T13:05:10.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:10 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:05:10.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:10 vm00 ceph-mon[47364]: from='client.? 192.168.123.108:0/328102217' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T13:05:10.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:10 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:10.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:10 vm00 ceph-mon[47364]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T13:05:10.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:10 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:10.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:10 vm00 ceph-mon[47364]: Updating vm08:/etc/ceph/ceph.conf 2026-03-10T13:05:10.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:10 vm00 ceph-mon[47364]: Updating vm08:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:05:10.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:10 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:05:10.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:10 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:10.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:10 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:05:10.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:10 vm00 ceph-mon[51670]: from='client.? 192.168.123.108:0/328102217' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T13:05:10.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:10 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:10.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:10 vm00 ceph-mon[51670]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T13:05:10.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:10 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:10.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:10 vm00 ceph-mon[51670]: Updating vm08:/etc/ceph/ceph.conf 2026-03-10T13:05:10.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:10 vm00 ceph-mon[51670]: Updating vm08:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:05:10.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:10 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:05:10.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:10 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:10.931 INFO:tasks.cephadm:Waiting for 3 mons in monmap... 2026-03-10T13:05:10.931 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph mon dump -f json 2026-03-10T13:05:15.807 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:05:15.807 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:05:15.807 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:05:15.807 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[47364]: mon.a calling monitor election 2026-03-10T13:05:15.807 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[47364]: mon.c calling monitor election 2026-03-10T13:05:15.807 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:05:15.807 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:05:15.807 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:05:15.807 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[47364]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T13:05:15.807 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:05:15.807 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:05:15.807 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[47364]: mon.a is new leader, mons a,c in quorum (ranks 0,1) 2026-03-10T13:05:15.807 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[47364]: monmap e3: 3 mons at {a=[v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0],b=[v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0],c=[v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0]} 2026-03-10T13:05:15.807 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[47364]: fsmap 2026-03-10T13:05:15.807 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[47364]: osdmap e4: 0 total, 0 up, 0 in 2026-03-10T13:05:15.807 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[47364]: mgrmap e13: y(active, since 22s) 2026-03-10T13:05:15.807 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[47364]: overall HEALTH_OK 2026-03-10T13:05:15.807 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:15.807 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:15.807 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:15.807 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T13:05:15.807 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T13:05:15.807 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:15.807 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:05:15.807 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:05:15.807 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:05:15.807 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[51670]: mon.a calling monitor election 2026-03-10T13:05:15.807 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[51670]: mon.c calling monitor election 2026-03-10T13:05:15.807 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:05:15.808 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:05:15.808 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:05:15.808 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[51670]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T13:05:15.808 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:05:15.808 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:05:15.808 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[51670]: mon.a is new leader, mons a,c in quorum (ranks 0,1) 2026-03-10T13:05:15.808 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[51670]: monmap e3: 3 mons at {a=[v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0],b=[v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0],c=[v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0]} 2026-03-10T13:05:15.808 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[51670]: fsmap 2026-03-10T13:05:15.808 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[51670]: osdmap e4: 0 total, 0 up, 0 in 2026-03-10T13:05:15.808 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[51670]: mgrmap e13: y(active, since 22s) 2026-03-10T13:05:15.808 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[51670]: overall HEALTH_OK 2026-03-10T13:05:15.808 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:15.808 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:15.808 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:15.808 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T13:05:15.808 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T13:05:15.808 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:15 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:15.884 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-10T13:05:15.884 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":3,"fsid":"98a3dada-1c81-11f1-89c9-d57c120f78d5","modified":"2026-03-10T13:05:10.450629Z","created":"2026-03-10T13:04:24.045109Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"},{"rank":1,"name":"c","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3301","nonce":0},{"type":"v1","addr":"192.168.123.100:6790","nonce":0}]},"addr":"192.168.123.100:6790/0","public_addr":"192.168.123.100:6790/0","priority":0,"weight":0,"crush_location":"{}"},{"rank":2,"name":"b","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:3300","nonce":0},{"type":"v1","addr":"192.168.123.108:6789","nonce":0}]},"addr":"192.168.123.108:6789/0","public_addr":"192.168.123.108:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0,1]} 2026-03-10T13:05:15.887 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 3 2026-03-10T13:05:15.965 INFO:tasks.cephadm:Generating final ceph.conf file... 2026-03-10T13:05:15.965 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph config generate-minimal-conf 2026-03-10T13:05:16.552 INFO:teuthology.orchestra.run.vm00.stdout:# minimal ceph.conf for 98a3dada-1c81-11f1-89c9-d57c120f78d5 2026-03-10T13:05:16.552 INFO:teuthology.orchestra.run.vm00.stdout:[global] 2026-03-10T13:05:16.552 INFO:teuthology.orchestra.run.vm00.stdout: fsid = 98a3dada-1c81-11f1-89c9-d57c120f78d5 2026-03-10T13:05:16.552 INFO:teuthology.orchestra.run.vm00.stdout: mon_host = [v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0] [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] [v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0] 2026-03-10T13:05:16.627 INFO:tasks.cephadm:Distributing (final) config and client.admin keyring... 2026-03-10T13:05:16.627 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-10T13:05:16.627 DEBUG:teuthology.orchestra.run.vm00:> sudo dd of=/etc/ceph/ceph.conf 2026-03-10T13:05:16.655 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-10T13:05:16.655 DEBUG:teuthology.orchestra.run.vm00:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:05:16.740 DEBUG:teuthology.orchestra.run.vm08:> set -ex 2026-03-10T13:05:16.740 DEBUG:teuthology.orchestra.run.vm08:> sudo dd of=/etc/ceph/ceph.conf 2026-03-10T13:05:16.771 DEBUG:teuthology.orchestra.run.vm08:> set -ex 2026-03-10T13:05:16.772 DEBUG:teuthology.orchestra.run.vm08:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:05:16.797 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:16 vm00 ceph-mon[47364]: Reconfiguring mon.a (unknown last config time)... 2026-03-10T13:05:16.797 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:16 vm00 ceph-mon[47364]: Reconfiguring daemon mon.a on vm00 2026-03-10T13:05:16.797 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:16 vm00 ceph-mon[47364]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T13:05:16.797 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:16 vm00 ceph-mon[47364]: from='client.? 192.168.123.108:0/3759000776' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T13:05:16.797 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:16 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:16.797 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:16 vm00 ceph-mon[47364]: Reconfiguring mon.c (monmap changed)... 2026-03-10T13:05:16.797 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:16 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T13:05:16.797 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:16 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T13:05:16.797 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:16 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:16.797 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:16 vm00 ceph-mon[47364]: Reconfiguring daemon mon.c on vm00 2026-03-10T13:05:16.797 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:16 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:16.798 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:16 vm00 ceph-mon[47364]: Reconfiguring mon.b (monmap changed)... 2026-03-10T13:05:16.798 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:16 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T13:05:16.798 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:16 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T13:05:16.798 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:16 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:16.798 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:16 vm00 ceph-mon[47364]: Reconfiguring daemon mon.b on vm08 2026-03-10T13:05:16.798 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:16 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:05:16.798 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:16 vm00 ceph-mon[51670]: Reconfiguring mon.a (unknown last config time)... 2026-03-10T13:05:16.798 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:16 vm00 ceph-mon[51670]: Reconfiguring daemon mon.a on vm00 2026-03-10T13:05:16.798 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:16 vm00 ceph-mon[51670]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T13:05:16.798 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:16 vm00 ceph-mon[51670]: from='client.? 192.168.123.108:0/3759000776' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T13:05:16.798 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:16 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:16.798 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:16 vm00 ceph-mon[51670]: Reconfiguring mon.c (monmap changed)... 2026-03-10T13:05:16.798 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:16 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T13:05:16.798 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:16 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T13:05:16.798 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:16 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:16.798 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:16 vm00 ceph-mon[51670]: Reconfiguring daemon mon.c on vm00 2026-03-10T13:05:16.798 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:16 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:16.798 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:16 vm00 ceph-mon[51670]: Reconfiguring mon.b (monmap changed)... 2026-03-10T13:05:16.798 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:16 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T13:05:16.798 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:16 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T13:05:16.798 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:16 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:16.798 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:16 vm00 ceph-mon[51670]: Reconfiguring daemon mon.b on vm08 2026-03-10T13:05:16.798 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:16 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:05:16.847 INFO:tasks.cephadm:Adding mgr.y on vm00 2026-03-10T13:05:16.848 INFO:tasks.cephadm:Adding mgr.x on vm08 2026-03-10T13:05:16.848 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph orch apply mgr '2;vm00=y;vm08=x' 2026-03-10T13:05:17.411 INFO:teuthology.orchestra.run.vm08.stdout:Scheduled mgr update... 2026-03-10T13:05:17.495 DEBUG:teuthology.orchestra.run.vm08:mgr.x> sudo journalctl -f -n 0 -u ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mgr.x.service 2026-03-10T13:05:17.497 INFO:tasks.cephadm:Deploying OSDs... 2026-03-10T13:05:17.497 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-10T13:05:17.497 DEBUG:teuthology.orchestra.run.vm00:> dd if=/scratch_devs of=/dev/stdout 2026-03-10T13:05:17.514 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T13:05:17.514 DEBUG:teuthology.orchestra.run.vm00:> ls /dev/[sv]d? 2026-03-10T13:05:17.570 INFO:teuthology.orchestra.run.vm00.stdout:/dev/vda 2026-03-10T13:05:17.570 INFO:teuthology.orchestra.run.vm00.stdout:/dev/vdb 2026-03-10T13:05:17.570 INFO:teuthology.orchestra.run.vm00.stdout:/dev/vdc 2026-03-10T13:05:17.570 INFO:teuthology.orchestra.run.vm00.stdout:/dev/vdd 2026-03-10T13:05:17.570 INFO:teuthology.orchestra.run.vm00.stdout:/dev/vde 2026-03-10T13:05:17.571 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-10T13:05:17.571 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-10T13:05:17.571 DEBUG:teuthology.orchestra.run.vm00:> stat /dev/vdb 2026-03-10T13:05:17.628 INFO:teuthology.orchestra.run.vm00.stdout: File: /dev/vdb 2026-03-10T13:05:17.628 INFO:teuthology.orchestra.run.vm00.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T13:05:17.629 INFO:teuthology.orchestra.run.vm00.stdout:Device: 6h/6d Inode: 254 Links: 1 Device type: fc,10 2026-03-10T13:05:17.629 INFO:teuthology.orchestra.run.vm00.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T13:05:17.629 INFO:teuthology.orchestra.run.vm00.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T13:05:17.629 INFO:teuthology.orchestra.run.vm00.stdout:Access: 2026-03-10 13:04:57.144062900 +0000 2026-03-10T13:05:17.629 INFO:teuthology.orchestra.run.vm00.stdout:Modify: 2026-03-10 13:04:56.889062594 +0000 2026-03-10T13:05:17.629 INFO:teuthology.orchestra.run.vm00.stdout:Change: 2026-03-10 13:04:56.889062594 +0000 2026-03-10T13:05:17.629 INFO:teuthology.orchestra.run.vm00.stdout: Birth: 2026-03-10 13:01:00.317000000 +0000 2026-03-10T13:05:17.629 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-10T13:05:17.697 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records in 2026-03-10T13:05:17.697 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records out 2026-03-10T13:05:17.697 INFO:teuthology.orchestra.run.vm00.stderr:512 bytes copied, 0.000130254 s, 3.9 MB/s 2026-03-10T13:05:17.698 DEBUG:teuthology.orchestra.run.vm00:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-10T13:05:17.757 DEBUG:teuthology.orchestra.run.vm00:> stat /dev/vdc 2026-03-10T13:05:17.815 INFO:teuthology.orchestra.run.vm00.stdout: File: /dev/vdc 2026-03-10T13:05:17.815 INFO:teuthology.orchestra.run.vm00.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T13:05:17.815 INFO:teuthology.orchestra.run.vm00.stdout:Device: 6h/6d Inode: 255 Links: 1 Device type: fc,20 2026-03-10T13:05:17.815 INFO:teuthology.orchestra.run.vm00.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T13:05:17.815 INFO:teuthology.orchestra.run.vm00.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T13:05:17.815 INFO:teuthology.orchestra.run.vm00.stdout:Access: 2026-03-10 13:04:57.211062980 +0000 2026-03-10T13:05:17.815 INFO:teuthology.orchestra.run.vm00.stdout:Modify: 2026-03-10 13:04:56.885062589 +0000 2026-03-10T13:05:17.815 INFO:teuthology.orchestra.run.vm00.stdout:Change: 2026-03-10 13:04:56.885062589 +0000 2026-03-10T13:05:17.815 INFO:teuthology.orchestra.run.vm00.stdout: Birth: 2026-03-10 13:01:00.325000000 +0000 2026-03-10T13:05:17.816 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-10T13:05:17.880 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records in 2026-03-10T13:05:17.880 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records out 2026-03-10T13:05:17.880 INFO:teuthology.orchestra.run.vm00.stderr:512 bytes copied, 0.000148188 s, 3.5 MB/s 2026-03-10T13:05:17.882 DEBUG:teuthology.orchestra.run.vm00:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-10T13:05:17.939 DEBUG:teuthology.orchestra.run.vm00:> stat /dev/vdd 2026-03-10T13:05:18.001 INFO:teuthology.orchestra.run.vm00.stdout: File: /dev/vdd 2026-03-10T13:05:18.001 INFO:teuthology.orchestra.run.vm00.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T13:05:18.001 INFO:teuthology.orchestra.run.vm00.stdout:Device: 6h/6d Inode: 256 Links: 1 Device type: fc,30 2026-03-10T13:05:18.001 INFO:teuthology.orchestra.run.vm00.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T13:05:18.002 INFO:teuthology.orchestra.run.vm00.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T13:05:18.002 INFO:teuthology.orchestra.run.vm00.stdout:Access: 2026-03-10 13:04:57.284063068 +0000 2026-03-10T13:05:18.002 INFO:teuthology.orchestra.run.vm00.stdout:Modify: 2026-03-10 13:04:56.894062600 +0000 2026-03-10T13:05:18.002 INFO:teuthology.orchestra.run.vm00.stdout:Change: 2026-03-10 13:04:56.894062600 +0000 2026-03-10T13:05:18.002 INFO:teuthology.orchestra.run.vm00.stdout: Birth: 2026-03-10 13:01:00.337000000 +0000 2026-03-10T13:05:18.002 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-10T13:05:18.072 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records in 2026-03-10T13:05:18.073 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records out 2026-03-10T13:05:18.073 INFO:teuthology.orchestra.run.vm00.stderr:512 bytes copied, 0.00013382 s, 3.8 MB/s 2026-03-10T13:05:18.074 DEBUG:teuthology.orchestra.run.vm00:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-10T13:05:18.132 DEBUG:teuthology.orchestra.run.vm00:> stat /dev/vde 2026-03-10T13:05:18.194 INFO:teuthology.orchestra.run.vm00.stdout: File: /dev/vde 2026-03-10T13:05:18.194 INFO:teuthology.orchestra.run.vm00.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T13:05:18.194 INFO:teuthology.orchestra.run.vm00.stdout:Device: 6h/6d Inode: 257 Links: 1 Device type: fc,40 2026-03-10T13:05:18.194 INFO:teuthology.orchestra.run.vm00.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T13:05:18.194 INFO:teuthology.orchestra.run.vm00.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T13:05:18.194 INFO:teuthology.orchestra.run.vm00.stdout:Access: 2026-03-10 13:04:57.360063159 +0000 2026-03-10T13:05:18.194 INFO:teuthology.orchestra.run.vm00.stdout:Modify: 2026-03-10 13:04:56.882062585 +0000 2026-03-10T13:05:18.194 INFO:teuthology.orchestra.run.vm00.stdout:Change: 2026-03-10 13:04:56.882062585 +0000 2026-03-10T13:05:18.194 INFO:teuthology.orchestra.run.vm00.stdout: Birth: 2026-03-10 13:01:00.340000000 +0000 2026-03-10T13:05:18.194 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-10T13:05:18.262 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records in 2026-03-10T13:05:18.262 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records out 2026-03-10T13:05:18.262 INFO:teuthology.orchestra.run.vm00.stderr:512 bytes copied, 0.000146815 s, 3.5 MB/s 2026-03-10T13:05:18.263 DEBUG:teuthology.orchestra.run.vm00:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-10T13:05:18.270 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:05:18 vm08 systemd[1]: Starting Ceph mgr.x for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:05:18.323 DEBUG:teuthology.orchestra.run.vm08:> set -ex 2026-03-10T13:05:18.323 DEBUG:teuthology.orchestra.run.vm08:> dd if=/scratch_devs of=/dev/stdout 2026-03-10T13:05:18.345 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T13:05:18.345 DEBUG:teuthology.orchestra.run.vm08:> ls /dev/[sv]d? 2026-03-10T13:05:18.413 INFO:teuthology.orchestra.run.vm08.stdout:/dev/vda 2026-03-10T13:05:18.413 INFO:teuthology.orchestra.run.vm08.stdout:/dev/vdb 2026-03-10T13:05:18.413 INFO:teuthology.orchestra.run.vm08.stdout:/dev/vdc 2026-03-10T13:05:18.413 INFO:teuthology.orchestra.run.vm08.stdout:/dev/vdd 2026-03-10T13:05:18.413 INFO:teuthology.orchestra.run.vm08.stdout:/dev/vde 2026-03-10T13:05:18.413 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-10T13:05:18.414 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-10T13:05:18.414 DEBUG:teuthology.orchestra.run.vm08:> stat /dev/vdb 2026-03-10T13:05:18.525 INFO:teuthology.orchestra.run.vm08.stdout: File: /dev/vdb 2026-03-10T13:05:18.525 INFO:teuthology.orchestra.run.vm08.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T13:05:18.525 INFO:teuthology.orchestra.run.vm08.stdout:Device: 6h/6d Inode: 254 Links: 1 Device type: fc,10 2026-03-10T13:05:18.525 INFO:teuthology.orchestra.run.vm08.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T13:05:18.525 INFO:teuthology.orchestra.run.vm08.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T13:05:18.525 INFO:teuthology.orchestra.run.vm08.stdout:Access: 2026-03-10 13:05:02.742604128 +0000 2026-03-10T13:05:18.525 INFO:teuthology.orchestra.run.vm08.stdout:Modify: 2026-03-10 13:05:02.384603680 +0000 2026-03-10T13:05:18.525 INFO:teuthology.orchestra.run.vm08.stdout:Change: 2026-03-10 13:05:02.384603680 +0000 2026-03-10T13:05:18.525 INFO:teuthology.orchestra.run.vm08.stdout: Birth: 2026-03-10 13:00:29.295000000 +0000 2026-03-10T13:05:18.526 DEBUG:teuthology.orchestra.run.vm08:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-10T13:05:18.531 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:05:18 vm08 podman[50826]: 2026-03-10 13:05:18.266966586 +0000 UTC m=+0.019773345 container create b15e662f34e160f41224209b46471e84f25e053744e7f95d3513a83dfd9d6adc (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x, build-date=2022-05-03T08:36:31.336870, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.buildah.version=1.19.8, distribution-scope=public, maintainer=Guillaume Abrioux , RELEASE=HEAD, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, io.openshift.tags=base centos centos-stream, vendor=Red Hat, Inc., ceph=True, version=8, name=centos-stream, io.openshift.expose-services=, GIT_BRANCH=HEAD, io.k8s.display-name=CentOS Stream 8, com.redhat.component=centos-stream-container, CEPH_POINT_RELEASE=-17.2.0, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, vcs-type=git, GIT_CLEAN=True, GIT_REPO=https://github.com/ceph/ceph-container.git, release=754, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, architecture=x86_64) 2026-03-10T13:05:18.531 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:05:18 vm08 podman[50826]: 2026-03-10 13:05:18.335945038 +0000 UTC m=+0.088751786 container init b15e662f34e160f41224209b46471e84f25e053744e7f95d3513a83dfd9d6adc (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, vcs-type=git, io.openshift.tags=base centos centos-stream, GIT_REPO=https://github.com/ceph/ceph-container.git, GIT_BRANCH=HEAD, com.redhat.component=centos-stream-container, maintainer=Guillaume Abrioux , version=8, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, distribution-scope=public, RELEASE=HEAD, release=754, ceph=True, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, name=centos-stream, io.buildah.version=1.19.8, io.openshift.expose-services=, GIT_CLEAN=True, io.k8s.display-name=CentOS Stream 8, CEPH_POINT_RELEASE=-17.2.0, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, architecture=x86_64, build-date=2022-05-03T08:36:31.336870, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, vendor=Red Hat, Inc., com.redhat.license_terms=https://centos.org/legal/licensing-policy/, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac) 2026-03-10T13:05:18.531 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:05:18 vm08 podman[50826]: 2026-03-10 13:05:18.339509628 +0000 UTC m=+0.092316387 container start b15e662f34e160f41224209b46471e84f25e053744e7f95d3513a83dfd9d6adc (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x, GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.component=centos-stream-container, maintainer=Guillaume Abrioux , vendor=Red Hat, Inc., CEPH_POINT_RELEASE=-17.2.0, RELEASE=HEAD, release=754, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, distribution-scope=public, GIT_BRANCH=HEAD, io.k8s.display-name=CentOS Stream 8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, io.buildah.version=1.19.8, version=8, architecture=x86_64, build-date=2022-05-03T08:36:31.336870, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.openshift.expose-services=, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., ceph=True, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, name=centos-stream, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, vcs-type=git, io.openshift.tags=base centos centos-stream, GIT_CLEAN=True) 2026-03-10T13:05:18.531 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:05:18 vm08 bash[50826]: b15e662f34e160f41224209b46471e84f25e053744e7f95d3513a83dfd9d6adc 2026-03-10T13:05:18.532 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:05:18 vm08 podman[50826]: 2026-03-10 13:05:18.258899946 +0000 UTC m=+0.011706715 image pull e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9 quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a 2026-03-10T13:05:18.532 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:05:18 vm08 systemd[1]: Started Ceph mgr.x for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:05:18.532 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:05:18 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:05:18.472+0000 7f329fe02000 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-10T13:05:18.549 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records in 2026-03-10T13:05:18.549 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records out 2026-03-10T13:05:18.549 INFO:teuthology.orchestra.run.vm08.stderr:512 bytes copied, 0.000266092 s, 1.9 MB/s 2026-03-10T13:05:18.550 DEBUG:teuthology.orchestra.run.vm08:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-10T13:05:18.647 DEBUG:teuthology.orchestra.run.vm08:> stat /dev/vdc 2026-03-10T13:05:18.706 INFO:teuthology.orchestra.run.vm08.stdout: File: /dev/vdc 2026-03-10T13:05:18.706 INFO:teuthology.orchestra.run.vm08.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T13:05:18.706 INFO:teuthology.orchestra.run.vm08.stdout:Device: 6h/6d Inode: 255 Links: 1 Device type: fc,20 2026-03-10T13:05:18.706 INFO:teuthology.orchestra.run.vm08.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T13:05:18.706 INFO:teuthology.orchestra.run.vm08.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T13:05:18.706 INFO:teuthology.orchestra.run.vm08.stdout:Access: 2026-03-10 13:05:02.805604207 +0000 2026-03-10T13:05:18.706 INFO:teuthology.orchestra.run.vm08.stdout:Modify: 2026-03-10 13:05:02.406603708 +0000 2026-03-10T13:05:18.706 INFO:teuthology.orchestra.run.vm08.stdout:Change: 2026-03-10 13:05:02.406603708 +0000 2026-03-10T13:05:18.706 INFO:teuthology.orchestra.run.vm08.stdout: Birth: 2026-03-10 13:00:29.302000000 +0000 2026-03-10T13:05:18.706 DEBUG:teuthology.orchestra.run.vm08:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-10T13:05:18.749 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records in 2026-03-10T13:05:18.749 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records out 2026-03-10T13:05:18.749 INFO:teuthology.orchestra.run.vm08.stderr:512 bytes copied, 0.000704921 s, 726 kB/s 2026-03-10T13:05:18.751 DEBUG:teuthology.orchestra.run.vm08:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-10T13:05:18.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:18 vm00 ceph-mon[47364]: mon.b calling monitor election 2026-03-10T13:05:18.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:18 vm00 ceph-mon[47364]: from='client.14208 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "placement": "2;vm00=y;vm08=x", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:05:18.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:18 vm00 ceph-mon[47364]: Saving service mgr spec with placement vm00=y;vm08=x;count:2 2026-03-10T13:05:18.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:18 vm00 ceph-mon[47364]: Deploying daemon mgr.x on vm08 2026-03-10T13:05:18.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:18 vm00 ceph-mon[47364]: mon.b calling monitor election 2026-03-10T13:05:18.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:18 vm00 ceph-mon[47364]: mon.c calling monitor election 2026-03-10T13:05:18.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:18 vm00 ceph-mon[47364]: mon.a calling monitor election 2026-03-10T13:05:18.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:18 vm00 ceph-mon[47364]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T13:05:18.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:18 vm00 ceph-mon[47364]: monmap e3: 3 mons at {a=[v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0],b=[v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0],c=[v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0]} 2026-03-10T13:05:18.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:18 vm00 ceph-mon[47364]: fsmap 2026-03-10T13:05:18.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:18 vm00 ceph-mon[47364]: osdmap e4: 0 total, 0 up, 0 in 2026-03-10T13:05:18.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:18 vm00 ceph-mon[47364]: mgrmap e13: y(active, since 24s) 2026-03-10T13:05:18.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:18 vm00 ceph-mon[47364]: overall HEALTH_OK 2026-03-10T13:05:18.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:18 vm00 ceph-mon[47364]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T13:05:18.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:18 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:18.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:18 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:18.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:18 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:18.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:18 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:05:18.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:18 vm00 ceph-mon[51670]: mon.b calling monitor election 2026-03-10T13:05:18.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:18 vm00 ceph-mon[51670]: from='client.14208 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "placement": "2;vm00=y;vm08=x", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:05:18.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:18 vm00 ceph-mon[51670]: Saving service mgr spec with placement vm00=y;vm08=x;count:2 2026-03-10T13:05:18.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:18 vm00 ceph-mon[51670]: Deploying daemon mgr.x on vm08 2026-03-10T13:05:18.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:18 vm00 ceph-mon[51670]: mon.b calling monitor election 2026-03-10T13:05:18.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:18 vm00 ceph-mon[51670]: mon.c calling monitor election 2026-03-10T13:05:18.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:18 vm00 ceph-mon[51670]: mon.a calling monitor election 2026-03-10T13:05:18.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:18 vm00 ceph-mon[51670]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T13:05:18.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:18 vm00 ceph-mon[51670]: monmap e3: 3 mons at {a=[v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0],b=[v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0],c=[v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0]} 2026-03-10T13:05:18.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:18 vm00 ceph-mon[51670]: fsmap 2026-03-10T13:05:18.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:18 vm00 ceph-mon[51670]: osdmap e4: 0 total, 0 up, 0 in 2026-03-10T13:05:18.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:18 vm00 ceph-mon[51670]: mgrmap e13: y(active, since 24s) 2026-03-10T13:05:18.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:18 vm00 ceph-mon[51670]: overall HEALTH_OK 2026-03-10T13:05:18.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:18 vm00 ceph-mon[51670]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T13:05:18.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:18 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:18.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:18 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:18.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:18 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:18.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:18 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:05:18.820 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:05:18 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:05:18.539+0000 7f329fe02000 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T13:05:18.827 DEBUG:teuthology.orchestra.run.vm08:> stat /dev/vdd 2026-03-10T13:05:18.847 INFO:teuthology.orchestra.run.vm08.stdout: File: /dev/vdd 2026-03-10T13:05:18.847 INFO:teuthology.orchestra.run.vm08.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T13:05:18.847 INFO:teuthology.orchestra.run.vm08.stdout:Device: 6h/6d Inode: 256 Links: 1 Device type: fc,30 2026-03-10T13:05:18.847 INFO:teuthology.orchestra.run.vm08.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T13:05:18.847 INFO:teuthology.orchestra.run.vm08.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T13:05:18.847 INFO:teuthology.orchestra.run.vm08.stdout:Access: 2026-03-10 13:05:02.878604298 +0000 2026-03-10T13:05:18.847 INFO:teuthology.orchestra.run.vm08.stdout:Modify: 2026-03-10 13:05:02.399603699 +0000 2026-03-10T13:05:18.847 INFO:teuthology.orchestra.run.vm08.stdout:Change: 2026-03-10 13:05:02.399603699 +0000 2026-03-10T13:05:18.847 INFO:teuthology.orchestra.run.vm08.stdout: Birth: 2026-03-10 13:00:29.309000000 +0000 2026-03-10T13:05:18.847 DEBUG:teuthology.orchestra.run.vm08:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-10T13:05:18.943 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records in 2026-03-10T13:05:18.943 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records out 2026-03-10T13:05:18.943 INFO:teuthology.orchestra.run.vm08.stderr:512 bytes copied, 0.000232948 s, 2.2 MB/s 2026-03-10T13:05:18.947 DEBUG:teuthology.orchestra.run.vm08:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-10T13:05:19.036 DEBUG:teuthology.orchestra.run.vm08:> stat /dev/vde 2026-03-10T13:05:19.057 INFO:teuthology.orchestra.run.vm08.stdout: File: /dev/vde 2026-03-10T13:05:19.057 INFO:teuthology.orchestra.run.vm08.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T13:05:19.057 INFO:teuthology.orchestra.run.vm08.stdout:Device: 6h/6d Inode: 257 Links: 1 Device type: fc,40 2026-03-10T13:05:19.057 INFO:teuthology.orchestra.run.vm08.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T13:05:19.057 INFO:teuthology.orchestra.run.vm08.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T13:05:19.057 INFO:teuthology.orchestra.run.vm08.stdout:Access: 2026-03-10 13:05:02.999604450 +0000 2026-03-10T13:05:19.057 INFO:teuthology.orchestra.run.vm08.stdout:Modify: 2026-03-10 13:05:02.398603698 +0000 2026-03-10T13:05:19.057 INFO:teuthology.orchestra.run.vm08.stdout:Change: 2026-03-10 13:05:02.398603698 +0000 2026-03-10T13:05:19.057 INFO:teuthology.orchestra.run.vm08.stdout: Birth: 2026-03-10 13:00:29.361000000 +0000 2026-03-10T13:05:19.057 DEBUG:teuthology.orchestra.run.vm08:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-10T13:05:19.125 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:05:19 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:05:19.020+0000 7f329fe02000 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T13:05:19.128 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records in 2026-03-10T13:05:19.129 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records out 2026-03-10T13:05:19.129 INFO:teuthology.orchestra.run.vm08.stderr:512 bytes copied, 0.000279046 s, 1.8 MB/s 2026-03-10T13:05:19.130 DEBUG:teuthology.orchestra.run.vm08:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-10T13:05:19.193 INFO:tasks.cephadm:Deploying osd.0 on vm00 with /dev/vde... 2026-03-10T13:05:19.193 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- lvm zap /dev/vde 2026-03-10T13:05:19.598 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:19 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:05:19.598 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:19 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:19.598 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:19 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:19.598 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:19 vm00 ceph-mon[47364]: Reconfiguring mgr.y (unknown last config time)... 2026-03-10T13:05:19.598 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:19 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T13:05:19.598 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:19 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T13:05:19.598 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:19 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:19.598 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:19 vm00 ceph-mon[47364]: Reconfiguring daemon mgr.y on vm00 2026-03-10T13:05:19.598 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:19 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:19.598 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:19 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:19.598 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:19 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:19.598 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:19 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:05:19.598 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:19 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:19.599 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:19 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:05:19.599 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:19 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:19.599 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:19 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:19.599 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:19 vm00 ceph-mon[51670]: Reconfiguring mgr.y (unknown last config time)... 2026-03-10T13:05:19.599 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:19 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T13:05:19.599 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:19 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T13:05:19.599 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:19 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:19.599 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:19 vm00 ceph-mon[51670]: Reconfiguring daemon mgr.y on vm00 2026-03-10T13:05:19.599 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:19 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:19.599 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:19 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:19.599 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:19 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:19.599 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:19 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:05:19.599 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:19 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:19.772 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:05:19 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:05:19.429+0000 7f329fe02000 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T13:05:19.772 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:05:19 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:05:19.599+0000 7f329fe02000 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T13:05:19.772 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:05:19 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:05:19.658+0000 7f329fe02000 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T13:05:19.938 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T13:05:19.963 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph orch daemon add osd vm00:/dev/vde 2026-03-10T13:05:20.272 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:05:19 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:05:19.852+0000 7f329fe02000 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T13:05:20.732 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:05:20 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:05:20.507+0000 7f329fe02000 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T13:05:20.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:20 vm00 ceph-mon[47364]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T13:05:20.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:20 vm00 ceph-mon[51670]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T13:05:21.005 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:05:20 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:05:20.728+0000 7f329fe02000 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T13:05:21.005 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:05:20 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:05:20.793+0000 7f329fe02000 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T13:05:21.005 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:05:20 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:05:20.861+0000 7f329fe02000 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T13:05:21.005 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:05:20 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:05:20.932+0000 7f329fe02000 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T13:05:21.005 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:05:21 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:05:21.001+0000 7f329fe02000 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T13:05:21.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:21 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:21.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:21 vm00 ceph-mon[47364]: from='client.24100 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:05:21.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:21 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T13:05:21.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:21 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T13:05:21.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:21 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:21.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:21 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:21.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:21 vm00 ceph-mon[51670]: from='client.24100 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:05:21.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:21 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T13:05:21.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:21 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T13:05:21.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:21 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:21.772 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:05:21 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:05:21.350+0000 7f329fe02000 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T13:05:21.772 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:05:21 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:05:21.435+0000 7f329fe02000 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T13:05:21.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:21 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:22.368 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:05:22 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:05:22.064+0000 7f329fe02000 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T13:05:22.368 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:05:22 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:05:22.133+0000 7f329fe02000 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T13:05:22.368 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:05:22 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:05:22.210+0000 7f329fe02000 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T13:05:22.623 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:05:22 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:05:22.364+0000 7f329fe02000 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T13:05:22.623 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:05:22 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:05:22.425+0000 7f329fe02000 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T13:05:22.623 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:05:22 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:05:22.526+0000 7f329fe02000 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T13:05:22.624 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:22 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/4075234332' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "33741dbc-5269-4c43-97b4-ac057d7a2041"}]: dispatch 2026-03-10T13:05:22.624 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:22 vm08 ceph-mon[49535]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "33741dbc-5269-4c43-97b4-ac057d7a2041"}]: dispatch 2026-03-10T13:05:22.624 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:22 vm08 ceph-mon[49535]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "33741dbc-5269-4c43-97b4-ac057d7a2041"}]': finished 2026-03-10T13:05:22.624 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:22 vm08 ceph-mon[49535]: osdmap e5: 1 total, 0 up, 1 in 2026-03-10T13:05:22.624 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:22 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T13:05:22.624 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:22 vm08 ceph-mon[49535]: pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T13:05:22.624 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:22 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/3182235372' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T13:05:22.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:22 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/4075234332' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "33741dbc-5269-4c43-97b4-ac057d7a2041"}]: dispatch 2026-03-10T13:05:22.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:22 vm00 ceph-mon[47364]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "33741dbc-5269-4c43-97b4-ac057d7a2041"}]: dispatch 2026-03-10T13:05:22.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:22 vm00 ceph-mon[47364]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "33741dbc-5269-4c43-97b4-ac057d7a2041"}]': finished 2026-03-10T13:05:22.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:22 vm00 ceph-mon[47364]: osdmap e5: 1 total, 0 up, 1 in 2026-03-10T13:05:22.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:22 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T13:05:22.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:22 vm00 ceph-mon[47364]: pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T13:05:22.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:22 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/3182235372' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T13:05:22.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:22 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/4075234332' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "33741dbc-5269-4c43-97b4-ac057d7a2041"}]: dispatch 2026-03-10T13:05:22.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:22 vm00 ceph-mon[51670]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "33741dbc-5269-4c43-97b4-ac057d7a2041"}]: dispatch 2026-03-10T13:05:22.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:22 vm00 ceph-mon[51670]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "33741dbc-5269-4c43-97b4-ac057d7a2041"}]': finished 2026-03-10T13:05:22.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:22 vm00 ceph-mon[51670]: osdmap e5: 1 total, 0 up, 1 in 2026-03-10T13:05:22.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:22 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T13:05:22.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:22 vm00 ceph-mon[51670]: pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T13:05:22.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:22 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/3182235372' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T13:05:22.973 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:05:22 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:05:22.619+0000 7f329fe02000 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T13:05:23.272 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:05:22 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:05:22.969+0000 7f329fe02000 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T13:05:23.272 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:05:23 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:05:23.032+0000 7f329fe02000 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T13:05:24.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:23 vm00 ceph-mon[47364]: Standby manager daemon x started 2026-03-10T13:05:24.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:23 vm00 ceph-mon[47364]: from='mgr.? 192.168.123.108:0/1798379658' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T13:05:24.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:23 vm00 ceph-mon[47364]: from='mgr.? 192.168.123.108:0/1798379658' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T13:05:24.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:23 vm00 ceph-mon[47364]: from='mgr.? 192.168.123.108:0/1798379658' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T13:05:24.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:23 vm00 ceph-mon[47364]: from='mgr.? 192.168.123.108:0/1798379658' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T13:05:24.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:24 vm00 ceph-mon[51670]: Standby manager daemon x started 2026-03-10T13:05:24.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:24 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.108:0/1798379658' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T13:05:24.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:24 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.108:0/1798379658' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T13:05:24.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:24 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.108:0/1798379658' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T13:05:24.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:24 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.108:0/1798379658' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T13:05:24.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:24 vm08 ceph-mon[49535]: Standby manager daemon x started 2026-03-10T13:05:24.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:24 vm08 ceph-mon[49535]: from='mgr.? 192.168.123.108:0/1798379658' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T13:05:24.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:24 vm08 ceph-mon[49535]: from='mgr.? 192.168.123.108:0/1798379658' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T13:05:24.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:24 vm08 ceph-mon[49535]: from='mgr.? 192.168.123.108:0/1798379658' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T13:05:24.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:24 vm08 ceph-mon[49535]: from='mgr.? 192.168.123.108:0/1798379658' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T13:05:25.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:24 vm00 ceph-mon[47364]: pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T13:05:25.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:24 vm00 ceph-mon[47364]: mgrmap e14: y(active, since 31s), standbys: x 2026-03-10T13:05:25.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:24 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T13:05:25.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:24 vm00 ceph-mon[51670]: pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T13:05:25.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:24 vm00 ceph-mon[51670]: mgrmap e14: y(active, since 31s), standbys: x 2026-03-10T13:05:25.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:24 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T13:05:25.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:24 vm08 ceph-mon[49535]: pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T13:05:25.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:24 vm08 ceph-mon[49535]: mgrmap e14: y(active, since 31s), standbys: x 2026-03-10T13:05:25.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:24 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T13:05:25.930 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:25 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-10T13:05:25.930 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:25 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:25.930 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:25 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-10T13:05:25.930 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:25 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:26.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:25 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-10T13:05:26.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:25 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:27.156 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:26 vm00 ceph-mon[47364]: pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T13:05:27.156 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:26 vm00 ceph-mon[47364]: Deploying daemon osd.0 on vm00 2026-03-10T13:05:27.156 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:26 vm00 ceph-mon[51670]: pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T13:05:27.156 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:26 vm00 ceph-mon[51670]: Deploying daemon osd.0 on vm00 2026-03-10T13:05:27.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:26 vm08 ceph-mon[49535]: pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T13:05:27.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:26 vm08 ceph-mon[49535]: Deploying daemon osd.0 on vm00 2026-03-10T13:05:28.323 INFO:teuthology.orchestra.run.vm00.stdout:Created osd(s) 0 on host 'vm00' 2026-03-10T13:05:28.431 DEBUG:teuthology.orchestra.run.vm00:osd.0> sudo journalctl -f -n 0 -u ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.0.service 2026-03-10T13:05:28.433 INFO:tasks.cephadm:Deploying osd.1 on vm00 with /dev/vdd... 2026-03-10T13:05:28.433 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- lvm zap /dev/vdd 2026-03-10T13:05:28.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:28 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:28.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:28 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:28.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:28 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:28.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:28 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:05:28.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:28 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:28.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:28 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:28.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:28 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:28.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:28 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:28.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:28 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:05:28.521 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:28 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:28.521 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:28 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:28.521 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:28 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:28.521 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:28 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:28.521 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:28 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:05:28.521 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:28 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:28.521 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:28 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:28.521 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:28 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:28.521 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:28 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:28.521 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:28 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:05:28.522 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:28 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:28.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:28 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:28.773 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:28 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:28.773 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:28 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:28.773 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:28 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:05:28.773 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:28 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:28.773 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:28 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:28.773 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:28 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:28.773 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:28 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:28.773 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:28 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:05:28.773 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:28 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:28.795 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:05:28 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0[54704]: 2026-03-10T13:05:28.527+0000 7fdaebf8d3c0 -1 osd.0 0 log_to_monitors true 2026-03-10T13:05:29.525 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:29 vm00 ceph-mon[47364]: pgmap v12: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T13:05:29.525 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:29 vm00 ceph-mon[47364]: from='osd.0 [v2:192.168.123.100:6802/771461294,v1:192.168.123.100:6803/771461294]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T13:05:29.525 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:29 vm00 ceph-mon[47364]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T13:05:29.783 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:29 vm00 ceph-mon[51670]: pgmap v12: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T13:05:29.783 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:29 vm00 ceph-mon[51670]: from='osd.0 [v2:192.168.123.100:6802/771461294,v1:192.168.123.100:6803/771461294]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T13:05:29.783 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:29 vm00 ceph-mon[51670]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T13:05:30.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:29 vm08 ceph-mon[49535]: pgmap v12: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T13:05:30.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:29 vm08 ceph-mon[49535]: from='osd.0 [v2:192.168.123.100:6802/771461294,v1:192.168.123.100:6803/771461294]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T13:05:30.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:29 vm08 ceph-mon[49535]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T13:05:30.192 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T13:05:30.207 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph orch daemon add osd vm00:/dev/vdd 2026-03-10T13:05:30.695 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:30 vm00 ceph-mon[47364]: pgmap v13: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T13:05:30.695 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:30 vm00 ceph-mon[47364]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-10T13:05:30.695 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:30 vm00 ceph-mon[47364]: osdmap e6: 1 total, 0 up, 1 in 2026-03-10T13:05:30.695 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:30 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T13:05:30.695 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:30 vm00 ceph-mon[47364]: from='osd.0 [v2:192.168.123.100:6802/771461294,v1:192.168.123.100:6803/771461294]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:05:30.695 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:30 vm00 ceph-mon[47364]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:05:30.695 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:30 vm00 ceph-mon[47364]: Detected new or changed devices on vm00 2026-03-10T13:05:30.695 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:30 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:30.695 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:30 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:05:30.695 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:30 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:30.695 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:30 vm00 ceph-mon[51670]: pgmap v13: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T13:05:30.695 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:30 vm00 ceph-mon[51670]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-10T13:05:30.695 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:30 vm00 ceph-mon[51670]: osdmap e6: 1 total, 0 up, 1 in 2026-03-10T13:05:30.695 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:30 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T13:05:30.695 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:30 vm00 ceph-mon[51670]: from='osd.0 [v2:192.168.123.100:6802/771461294,v1:192.168.123.100:6803/771461294]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:05:30.695 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:30 vm00 ceph-mon[51670]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:05:30.695 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:30 vm00 ceph-mon[51670]: Detected new or changed devices on vm00 2026-03-10T13:05:30.696 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:30 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:30.696 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:30 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:05:30.696 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:30 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:31.003 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:05:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0[54704]: 2026-03-10T13:05:30.690+0000 7fdae2990700 -1 osd.0 0 waiting for initial osdmap 2026-03-10T13:05:31.004 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:05:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0[54704]: 2026-03-10T13:05:30.700+0000 7fdadd327700 -1 osd.0 7 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T13:05:31.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:30 vm08 ceph-mon[49535]: pgmap v13: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T13:05:31.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:30 vm08 ceph-mon[49535]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-10T13:05:31.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:30 vm08 ceph-mon[49535]: osdmap e6: 1 total, 0 up, 1 in 2026-03-10T13:05:31.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:30 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T13:05:31.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:30 vm08 ceph-mon[49535]: from='osd.0 [v2:192.168.123.100:6802/771461294,v1:192.168.123.100:6803/771461294]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:05:31.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:30 vm08 ceph-mon[49535]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:05:31.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:30 vm08 ceph-mon[49535]: Detected new or changed devices on vm00 2026-03-10T13:05:31.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:30 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:31.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:30 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:05:31.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:30 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:32.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:31 vm00 ceph-mon[47364]: purged_snaps scrub starts 2026-03-10T13:05:32.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:31 vm00 ceph-mon[47364]: purged_snaps scrub ok 2026-03-10T13:05:32.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:31 vm00 ceph-mon[47364]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-10T13:05:32.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:31 vm00 ceph-mon[47364]: osdmap e7: 1 total, 0 up, 1 in 2026-03-10T13:05:32.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:31 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T13:05:32.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:31 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T13:05:32.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:31 vm00 ceph-mon[47364]: from='client.24124 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:05:32.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:31 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T13:05:32.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:31 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T13:05:32.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:31 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:32.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:31 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2782838248' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "31dc7b09-f48f-4ec2-8ad6-69f3b68a5138"}]: dispatch 2026-03-10T13:05:32.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:31 vm00 ceph-mon[47364]: osd.0 [v2:192.168.123.100:6802/771461294,v1:192.168.123.100:6803/771461294] boot 2026-03-10T13:05:32.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:31 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2782838248' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "31dc7b09-f48f-4ec2-8ad6-69f3b68a5138"}]': finished 2026-03-10T13:05:32.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:31 vm00 ceph-mon[47364]: osdmap e8: 2 total, 1 up, 2 in 2026-03-10T13:05:32.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:31 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T13:05:32.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:31 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T13:05:32.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:31 vm00 ceph-mon[51670]: purged_snaps scrub starts 2026-03-10T13:05:32.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:31 vm00 ceph-mon[51670]: purged_snaps scrub ok 2026-03-10T13:05:32.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:31 vm00 ceph-mon[51670]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-10T13:05:32.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:31 vm00 ceph-mon[51670]: osdmap e7: 1 total, 0 up, 1 in 2026-03-10T13:05:32.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:31 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T13:05:32.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:31 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T13:05:32.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:31 vm00 ceph-mon[51670]: from='client.24124 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:05:32.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:31 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T13:05:32.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:31 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T13:05:32.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:31 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:32.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:31 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2782838248' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "31dc7b09-f48f-4ec2-8ad6-69f3b68a5138"}]: dispatch 2026-03-10T13:05:32.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:31 vm00 ceph-mon[51670]: osd.0 [v2:192.168.123.100:6802/771461294,v1:192.168.123.100:6803/771461294] boot 2026-03-10T13:05:32.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:31 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2782838248' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "31dc7b09-f48f-4ec2-8ad6-69f3b68a5138"}]': finished 2026-03-10T13:05:32.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:31 vm00 ceph-mon[51670]: osdmap e8: 2 total, 1 up, 2 in 2026-03-10T13:05:32.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:31 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T13:05:32.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:31 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T13:05:32.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:31 vm08 ceph-mon[49535]: purged_snaps scrub starts 2026-03-10T13:05:32.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:31 vm08 ceph-mon[49535]: purged_snaps scrub ok 2026-03-10T13:05:32.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:31 vm08 ceph-mon[49535]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-10T13:05:32.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:31 vm08 ceph-mon[49535]: osdmap e7: 1 total, 0 up, 1 in 2026-03-10T13:05:32.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:31 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T13:05:32.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:31 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T13:05:32.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:31 vm08 ceph-mon[49535]: from='client.24124 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:05:32.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:31 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T13:05:32.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:31 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T13:05:32.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:31 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:32.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:31 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/2782838248' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "31dc7b09-f48f-4ec2-8ad6-69f3b68a5138"}]: dispatch 2026-03-10T13:05:32.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:31 vm08 ceph-mon[49535]: osd.0 [v2:192.168.123.100:6802/771461294,v1:192.168.123.100:6803/771461294] boot 2026-03-10T13:05:32.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:31 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/2782838248' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "31dc7b09-f48f-4ec2-8ad6-69f3b68a5138"}]': finished 2026-03-10T13:05:32.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:31 vm08 ceph-mon[49535]: osdmap e8: 2 total, 1 up, 2 in 2026-03-10T13:05:32.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:31 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T13:05:32.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:31 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T13:05:33.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:32 vm00 ceph-mon[47364]: pgmap v16: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T13:05:33.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:32 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2344114551' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T13:05:33.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:32 vm00 ceph-mon[51670]: pgmap v16: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T13:05:33.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:32 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2344114551' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T13:05:33.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:32 vm08 ceph-mon[49535]: pgmap v16: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T13:05:33.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:32 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/2344114551' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T13:05:34.944 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:34 vm00 ceph-mon[47364]: pgmap v18: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T13:05:34.944 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:34 vm00 ceph-mon[51670]: pgmap v18: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T13:05:35.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:34 vm08 ceph-mon[49535]: pgmap v18: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T13:05:35.897 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:35 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-10T13:05:35.897 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:35 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:35.897 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:35 vm00 ceph-mon[47364]: Deploying daemon osd.1 on vm00 2026-03-10T13:05:35.897 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:35 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-10T13:05:35.897 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:35 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:35.897 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:35 vm00 ceph-mon[51670]: Deploying daemon osd.1 on vm00 2026-03-10T13:05:36.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:35 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-10T13:05:36.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:35 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:36.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:35 vm08 ceph-mon[49535]: Deploying daemon osd.1 on vm00 2026-03-10T13:05:37.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:36 vm00 ceph-mon[51670]: pgmap v19: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T13:05:37.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:36 vm00 ceph-mon[47364]: pgmap v19: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T13:05:37.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:36 vm08 ceph-mon[49535]: pgmap v19: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T13:05:38.710 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:38 vm00 ceph-mon[51670]: pgmap v20: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T13:05:38.710 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:38 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:38.710 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:38 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:38.710 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:38 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:05:38.710 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:38 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:38.710 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:38 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:38.710 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:38 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:38.710 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:38 vm00 ceph-mon[47364]: pgmap v20: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T13:05:38.711 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:38 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:38.711 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:38 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:38.711 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:38 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:05:38.711 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:38 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:38.711 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:38 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:38.711 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:38 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:38.747 INFO:teuthology.orchestra.run.vm00.stdout:Created osd(s) 1 on host 'vm00' 2026-03-10T13:05:38.827 DEBUG:teuthology.orchestra.run.vm00:osd.1> sudo journalctl -f -n 0 -u ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.1.service 2026-03-10T13:05:38.830 INFO:tasks.cephadm:Deploying osd.2 on vm00 with /dev/vdc... 2026-03-10T13:05:38.830 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- lvm zap /dev/vdc 2026-03-10T13:05:39.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:38 vm08 ceph-mon[49535]: pgmap v20: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T13:05:39.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:38 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:39.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:38 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:39.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:38 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:05:39.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:38 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:39.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:38 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:39.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:38 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:39.310 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:05:39 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1[57427]: 2026-03-10T13:05:39.201+0000 7f546a6ea3c0 -1 osd.1 0 log_to_monitors true 2026-03-10T13:05:39.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:39 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:39.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:39 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:39.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:39 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:39.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:39 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:05:39.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:39 vm00 ceph-mon[51670]: from='osd.1 [v2:192.168.123.100:6810/3864481161,v1:192.168.123.100:6811/3864481161]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T13:05:39.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:39 vm00 ceph-mon[51670]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T13:05:39.755 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:39 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:39.755 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:39 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:40.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:39 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:40.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:39 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:05:40.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:39 vm00 ceph-mon[47364]: from='osd.1 [v2:192.168.123.100:6810/3864481161,v1:192.168.123.100:6811/3864481161]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T13:05:40.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:39 vm00 ceph-mon[47364]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T13:05:40.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:39 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:40.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:39 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:40.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:39 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:40.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:39 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:05:40.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:39 vm08 ceph-mon[49535]: from='osd.1 [v2:192.168.123.100:6810/3864481161,v1:192.168.123.100:6811/3864481161]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T13:05:40.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:39 vm08 ceph-mon[49535]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T13:05:40.628 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T13:05:40.643 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph orch daemon add osd vm00:/dev/vdc 2026-03-10T13:05:40.792 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:05:40 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1[57427]: 2026-03-10T13:05:40.770+0000 7f54610ed700 -1 osd.1 0 waiting for initial osdmap 2026-03-10T13:05:40.792 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:05:40 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1[57427]: 2026-03-10T13:05:40.788+0000 7f545b283700 -1 osd.1 10 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T13:05:41.076 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:40 vm00 ceph-mon[47364]: pgmap v21: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T13:05:41.076 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:40 vm00 ceph-mon[47364]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-10T13:05:41.076 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:40 vm00 ceph-mon[47364]: osdmap e9: 2 total, 1 up, 2 in 2026-03-10T13:05:41.076 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:40 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T13:05:41.076 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:40 vm00 ceph-mon[47364]: from='osd.1 [v2:192.168.123.100:6810/3864481161,v1:192.168.123.100:6811/3864481161]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:05:41.076 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:40 vm00 ceph-mon[47364]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:05:41.076 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:40 vm00 ceph-mon[47364]: Detected new or changed devices on vm00 2026-03-10T13:05:41.076 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:40 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:41.076 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:40 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:05:41.076 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:40 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:41.077 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:40 vm00 ceph-mon[51670]: pgmap v21: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T13:05:41.077 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:40 vm00 ceph-mon[51670]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-10T13:05:41.077 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:40 vm00 ceph-mon[51670]: osdmap e9: 2 total, 1 up, 2 in 2026-03-10T13:05:41.077 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:40 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T13:05:41.077 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:40 vm00 ceph-mon[51670]: from='osd.1 [v2:192.168.123.100:6810/3864481161,v1:192.168.123.100:6811/3864481161]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:05:41.077 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:40 vm00 ceph-mon[51670]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:05:41.077 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:40 vm00 ceph-mon[51670]: Detected new or changed devices on vm00 2026-03-10T13:05:41.077 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:40 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:41.077 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:40 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:05:41.077 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:40 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:41.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:40 vm08 ceph-mon[49535]: pgmap v21: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T13:05:41.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:40 vm08 ceph-mon[49535]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-10T13:05:41.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:40 vm08 ceph-mon[49535]: osdmap e9: 2 total, 1 up, 2 in 2026-03-10T13:05:41.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:40 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T13:05:41.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:40 vm08 ceph-mon[49535]: from='osd.1 [v2:192.168.123.100:6810/3864481161,v1:192.168.123.100:6811/3864481161]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:05:41.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:40 vm08 ceph-mon[49535]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:05:41.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:40 vm08 ceph-mon[49535]: Detected new or changed devices on vm00 2026-03-10T13:05:41.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:40 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:41.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:40 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:05:41.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:40 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:42.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:41 vm00 ceph-mon[47364]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-10T13:05:42.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:41 vm00 ceph-mon[47364]: osdmap e10: 2 total, 1 up, 2 in 2026-03-10T13:05:42.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:41 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T13:05:42.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:41 vm00 ceph-mon[47364]: from='client.14259 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:05:42.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:41 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T13:05:42.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:41 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T13:05:42.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:41 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:42.005 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:41 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T13:05:42.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:41 vm00 ceph-mon[51670]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-10T13:05:42.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:41 vm00 ceph-mon[51670]: osdmap e10: 2 total, 1 up, 2 in 2026-03-10T13:05:42.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:41 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T13:05:42.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:41 vm00 ceph-mon[51670]: from='client.14259 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:05:42.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:41 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T13:05:42.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:41 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T13:05:42.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:41 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:42.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:41 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T13:05:42.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:41 vm08 ceph-mon[49535]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-10T13:05:42.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:41 vm08 ceph-mon[49535]: osdmap e10: 2 total, 1 up, 2 in 2026-03-10T13:05:42.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:41 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T13:05:42.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:41 vm08 ceph-mon[49535]: from='client.14259 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:05:42.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:41 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T13:05:42.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:41 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T13:05:42.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:41 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:42.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:41 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T13:05:43.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:42 vm00 ceph-mon[47364]: purged_snaps scrub starts 2026-03-10T13:05:43.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:42 vm00 ceph-mon[47364]: purged_snaps scrub ok 2026-03-10T13:05:43.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:42 vm00 ceph-mon[47364]: pgmap v24: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T13:05:43.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:42 vm00 ceph-mon[47364]: osd.1 [v2:192.168.123.100:6810/3864481161,v1:192.168.123.100:6811/3864481161] boot 2026-03-10T13:05:43.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:42 vm00 ceph-mon[47364]: osdmap e11: 2 total, 2 up, 2 in 2026-03-10T13:05:43.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:42 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T13:05:43.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:42 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/3879907787' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "f9f7ad09-367f-410b-9921-f31c456c313d"}]: dispatch 2026-03-10T13:05:43.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:42 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/3879907787' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "f9f7ad09-367f-410b-9921-f31c456c313d"}]': finished 2026-03-10T13:05:43.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:42 vm00 ceph-mon[47364]: osdmap e12: 3 total, 2 up, 3 in 2026-03-10T13:05:43.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:42 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T13:05:43.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:42 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/1932598221' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T13:05:43.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:42 vm00 ceph-mon[51670]: purged_snaps scrub starts 2026-03-10T13:05:43.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:42 vm00 ceph-mon[51670]: purged_snaps scrub ok 2026-03-10T13:05:43.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:42 vm00 ceph-mon[51670]: pgmap v24: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T13:05:43.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:42 vm00 ceph-mon[51670]: osd.1 [v2:192.168.123.100:6810/3864481161,v1:192.168.123.100:6811/3864481161] boot 2026-03-10T13:05:43.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:42 vm00 ceph-mon[51670]: osdmap e11: 2 total, 2 up, 2 in 2026-03-10T13:05:43.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:42 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T13:05:43.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:42 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/3879907787' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "f9f7ad09-367f-410b-9921-f31c456c313d"}]: dispatch 2026-03-10T13:05:43.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:42 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/3879907787' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "f9f7ad09-367f-410b-9921-f31c456c313d"}]': finished 2026-03-10T13:05:43.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:42 vm00 ceph-mon[51670]: osdmap e12: 3 total, 2 up, 3 in 2026-03-10T13:05:43.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:42 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T13:05:43.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:42 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/1932598221' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T13:05:43.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:42 vm08 ceph-mon[49535]: purged_snaps scrub starts 2026-03-10T13:05:43.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:42 vm08 ceph-mon[49535]: purged_snaps scrub ok 2026-03-10T13:05:43.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:42 vm08 ceph-mon[49535]: pgmap v24: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T13:05:43.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:42 vm08 ceph-mon[49535]: osd.1 [v2:192.168.123.100:6810/3864481161,v1:192.168.123.100:6811/3864481161] boot 2026-03-10T13:05:43.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:42 vm08 ceph-mon[49535]: osdmap e11: 2 total, 2 up, 2 in 2026-03-10T13:05:43.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:42 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T13:05:43.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:42 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/3879907787' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "f9f7ad09-367f-410b-9921-f31c456c313d"}]: dispatch 2026-03-10T13:05:43.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:42 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/3879907787' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "f9f7ad09-367f-410b-9921-f31c456c313d"}]': finished 2026-03-10T13:05:43.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:42 vm08 ceph-mon[49535]: osdmap e12: 3 total, 2 up, 3 in 2026-03-10T13:05:43.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:42 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T13:05:43.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:42 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/1932598221' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T13:05:45.185 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:44 vm00 ceph-mon[47364]: pgmap v27: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-10T13:05:45.185 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:44 vm00 ceph-mon[51670]: pgmap v27: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-10T13:05:45.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:44 vm08 ceph-mon[49535]: pgmap v27: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-10T13:05:45.950 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:45 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-10T13:05:45.950 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:45 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:45.950 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:45 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-10T13:05:45.950 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:45 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:46.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:45 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-10T13:05:46.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:45 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:46.949 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:46 vm00 ceph-mon[47364]: pgmap v28: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-10T13:05:46.949 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:46 vm00 ceph-mon[47364]: Deploying daemon osd.2 on vm00 2026-03-10T13:05:46.949 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:46 vm00 ceph-mon[51670]: pgmap v28: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-10T13:05:46.949 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:46 vm00 ceph-mon[51670]: Deploying daemon osd.2 on vm00 2026-03-10T13:05:47.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:46 vm08 ceph-mon[49535]: pgmap v28: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-10T13:05:47.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:46 vm08 ceph-mon[49535]: Deploying daemon osd.2 on vm00 2026-03-10T13:05:48.918 INFO:teuthology.orchestra.run.vm00.stdout:Created osd(s) 2 on host 'vm00' 2026-03-10T13:05:49.019 DEBUG:teuthology.orchestra.run.vm00:osd.2> sudo journalctl -f -n 0 -u ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.2.service 2026-03-10T13:05:49.021 INFO:tasks.cephadm:Deploying osd.3 on vm00 with /dev/vdb... 2026-03-10T13:05:49.021 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- lvm zap /dev/vdb 2026-03-10T13:05:49.033 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:48 vm00 ceph-mon[47364]: pgmap v29: 0 pgs: ; 0 B data, 9.7 MiB used, 40 GiB / 40 GiB avail 2026-03-10T13:05:49.033 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:48 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:49.033 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:48 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:49.034 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:48 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:49.034 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:48 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:05:49.034 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:48 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:49.034 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:48 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:49.034 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:48 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:49.034 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:48 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:49.034 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:48 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:49.034 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:48 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:05:49.034 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:48 vm00 ceph-mon[51670]: pgmap v29: 0 pgs: ; 0 B data, 9.7 MiB used, 40 GiB / 40 GiB avail 2026-03-10T13:05:49.034 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:48 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:49.034 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:48 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:49.034 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:48 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:49.034 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:48 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:05:49.034 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:48 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:49.034 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:48 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:49.034 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:48 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:49.034 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:48 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:49.034 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:48 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:49.034 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:48 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:05:49.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:48 vm08 ceph-mon[49535]: pgmap v29: 0 pgs: ; 0 B data, 9.7 MiB used, 40 GiB / 40 GiB avail 2026-03-10T13:05:49.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:48 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:49.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:48 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:49.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:48 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:49.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:48 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:05:49.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:48 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:49.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:48 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:49.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:48 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:49.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:48 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:49.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:48 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:49.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:48 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:05:49.800 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:05:49 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2[60179]: 2026-03-10T13:05:49.545+0000 7fb3d7ab53c0 -1 osd.2 0 log_to_monitors true 2026-03-10T13:05:49.842 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T13:05:49.860 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph orch daemon add osd vm00:/dev/vdb 2026-03-10T13:05:50.062 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:49 vm00 ceph-mon[47364]: from='osd.2 [v2:192.168.123.100:6818/4074751068,v1:192.168.123.100:6819/4074751068]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-10T13:05:50.062 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:49 vm00 ceph-mon[51670]: from='osd.2 [v2:192.168.123.100:6818/4074751068,v1:192.168.123.100:6819/4074751068]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-10T13:05:50.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:49 vm08 ceph-mon[49535]: from='osd.2 [v2:192.168.123.100:6818/4074751068,v1:192.168.123.100:6819/4074751068]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-10T13:05:51.101 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:50 vm00 ceph-mon[51670]: pgmap v30: 0 pgs: ; 0 B data, 9.7 MiB used, 40 GiB / 40 GiB avail 2026-03-10T13:05:51.101 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:50 vm00 ceph-mon[51670]: from='osd.2 [v2:192.168.123.100:6818/4074751068,v1:192.168.123.100:6819/4074751068]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-10T13:05:51.101 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:50 vm00 ceph-mon[51670]: osdmap e13: 3 total, 2 up, 3 in 2026-03-10T13:05:51.101 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:50 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T13:05:51.101 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:50 vm00 ceph-mon[51670]: from='osd.2 [v2:192.168.123.100:6818/4074751068,v1:192.168.123.100:6819/4074751068]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:05:51.101 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:50 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T13:05:51.101 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:50 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T13:05:51.101 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:50 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:51.101 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:05:50 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2[60179]: 2026-03-10T13:05:50.974+0000 7fb3ce4b8700 -1 osd.2 0 waiting for initial osdmap 2026-03-10T13:05:51.101 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:05:51 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2[60179]: 2026-03-10T13:05:51.000+0000 7fb3c864e700 -1 osd.2 14 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T13:05:51.101 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:50 vm00 ceph-mon[47364]: pgmap v30: 0 pgs: ; 0 B data, 9.7 MiB used, 40 GiB / 40 GiB avail 2026-03-10T13:05:51.101 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:50 vm00 ceph-mon[47364]: from='osd.2 [v2:192.168.123.100:6818/4074751068,v1:192.168.123.100:6819/4074751068]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-10T13:05:51.101 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:50 vm00 ceph-mon[47364]: osdmap e13: 3 total, 2 up, 3 in 2026-03-10T13:05:51.101 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:50 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T13:05:51.101 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:50 vm00 ceph-mon[47364]: from='osd.2 [v2:192.168.123.100:6818/4074751068,v1:192.168.123.100:6819/4074751068]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:05:51.101 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:50 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T13:05:51.101 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:50 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T13:05:51.101 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:50 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:51.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:50 vm08 ceph-mon[49535]: pgmap v30: 0 pgs: ; 0 B data, 9.7 MiB used, 40 GiB / 40 GiB avail 2026-03-10T13:05:51.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:50 vm08 ceph-mon[49535]: from='osd.2 [v2:192.168.123.100:6818/4074751068,v1:192.168.123.100:6819/4074751068]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-10T13:05:51.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:50 vm08 ceph-mon[49535]: osdmap e13: 3 total, 2 up, 3 in 2026-03-10T13:05:51.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:50 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T13:05:51.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:50 vm08 ceph-mon[49535]: from='osd.2 [v2:192.168.123.100:6818/4074751068,v1:192.168.123.100:6819/4074751068]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:05:51.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:50 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T13:05:51.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:50 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T13:05:51.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:50 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:52.255 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:51 vm00 ceph-mon[47364]: from='client.14283 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:05:52.255 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:51 vm00 ceph-mon[47364]: from='osd.2 [v2:192.168.123.100:6818/4074751068,v1:192.168.123.100:6819/4074751068]' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-10T13:05:52.255 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:51 vm00 ceph-mon[47364]: osdmap e14: 3 total, 2 up, 3 in 2026-03-10T13:05:52.255 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:51 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T13:05:52.255 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:51 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T13:05:52.255 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:51 vm00 ceph-mon[47364]: Detected new or changed devices on vm00 2026-03-10T13:05:52.255 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:51 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:52.255 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:51 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:05:52.255 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:51 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:52.255 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:51 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2141655073' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "36dd1fdb-2d5f-4be6-b549-9bcc7e503439"}]: dispatch 2026-03-10T13:05:52.255 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:51 vm00 ceph-mon[47364]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "36dd1fdb-2d5f-4be6-b549-9bcc7e503439"}]: dispatch 2026-03-10T13:05:52.255 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:51 vm00 ceph-mon[47364]: osd.2 [v2:192.168.123.100:6818/4074751068,v1:192.168.123.100:6819/4074751068] boot 2026-03-10T13:05:52.255 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:51 vm00 ceph-mon[47364]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "36dd1fdb-2d5f-4be6-b549-9bcc7e503439"}]': finished 2026-03-10T13:05:52.255 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:51 vm00 ceph-mon[47364]: osdmap e15: 4 total, 3 up, 4 in 2026-03-10T13:05:52.255 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:51 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T13:05:52.255 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:51 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:05:52.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:51 vm00 ceph-mon[51670]: from='client.14283 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:05:52.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:51 vm00 ceph-mon[51670]: from='osd.2 [v2:192.168.123.100:6818/4074751068,v1:192.168.123.100:6819/4074751068]' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-10T13:05:52.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:51 vm00 ceph-mon[51670]: osdmap e14: 3 total, 2 up, 3 in 2026-03-10T13:05:52.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:51 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T13:05:52.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:51 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T13:05:52.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:51 vm00 ceph-mon[51670]: Detected new or changed devices on vm00 2026-03-10T13:05:52.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:51 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:52.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:51 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:05:52.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:51 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:52.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:51 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2141655073' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "36dd1fdb-2d5f-4be6-b549-9bcc7e503439"}]: dispatch 2026-03-10T13:05:52.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:51 vm00 ceph-mon[51670]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "36dd1fdb-2d5f-4be6-b549-9bcc7e503439"}]: dispatch 2026-03-10T13:05:52.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:51 vm00 ceph-mon[51670]: osd.2 [v2:192.168.123.100:6818/4074751068,v1:192.168.123.100:6819/4074751068] boot 2026-03-10T13:05:52.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:51 vm00 ceph-mon[51670]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "36dd1fdb-2d5f-4be6-b549-9bcc7e503439"}]': finished 2026-03-10T13:05:52.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:51 vm00 ceph-mon[51670]: osdmap e15: 4 total, 3 up, 4 in 2026-03-10T13:05:52.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:51 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T13:05:52.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:51 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:05:52.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:51 vm08 ceph-mon[49535]: from='client.14283 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm00:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:05:52.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:51 vm08 ceph-mon[49535]: from='osd.2 [v2:192.168.123.100:6818/4074751068,v1:192.168.123.100:6819/4074751068]' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-10T13:05:52.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:51 vm08 ceph-mon[49535]: osdmap e14: 3 total, 2 up, 3 in 2026-03-10T13:05:52.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:51 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T13:05:52.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:51 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T13:05:52.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:51 vm08 ceph-mon[49535]: Detected new or changed devices on vm00 2026-03-10T13:05:52.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:51 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:52.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:51 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:05:52.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:51 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:52.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:51 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/2141655073' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "36dd1fdb-2d5f-4be6-b549-9bcc7e503439"}]: dispatch 2026-03-10T13:05:52.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:51 vm08 ceph-mon[49535]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "36dd1fdb-2d5f-4be6-b549-9bcc7e503439"}]: dispatch 2026-03-10T13:05:52.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:51 vm08 ceph-mon[49535]: osd.2 [v2:192.168.123.100:6818/4074751068,v1:192.168.123.100:6819/4074751068] boot 2026-03-10T13:05:52.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:51 vm08 ceph-mon[49535]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "36dd1fdb-2d5f-4be6-b549-9bcc7e503439"}]': finished 2026-03-10T13:05:52.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:51 vm08 ceph-mon[49535]: osdmap e15: 4 total, 3 up, 4 in 2026-03-10T13:05:52.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:51 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T13:05:52.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:51 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:05:53.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:53 vm00 ceph-mon[47364]: purged_snaps scrub starts 2026-03-10T13:05:53.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:53 vm00 ceph-mon[47364]: purged_snaps scrub ok 2026-03-10T13:05:53.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:53 vm00 ceph-mon[47364]: pgmap v33: 0 pgs: ; 0 B data, 9.7 MiB used, 40 GiB / 40 GiB avail 2026-03-10T13:05:53.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:53 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/1164406470' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T13:05:53.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:53 vm00 ceph-mon[51670]: purged_snaps scrub starts 2026-03-10T13:05:53.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:53 vm00 ceph-mon[51670]: purged_snaps scrub ok 2026-03-10T13:05:53.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:53 vm00 ceph-mon[51670]: pgmap v33: 0 pgs: ; 0 B data, 9.7 MiB used, 40 GiB / 40 GiB avail 2026-03-10T13:05:53.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:53 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/1164406470' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T13:05:53.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:53 vm08 ceph-mon[49535]: purged_snaps scrub starts 2026-03-10T13:05:53.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:53 vm08 ceph-mon[49535]: purged_snaps scrub ok 2026-03-10T13:05:53.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:53 vm08 ceph-mon[49535]: pgmap v33: 0 pgs: ; 0 B data, 9.7 MiB used, 40 GiB / 40 GiB avail 2026-03-10T13:05:53.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:53 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/1164406470' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T13:05:54.443 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:54 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:05:54.443 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:54 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]: dispatch 2026-03-10T13:05:54.443 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:54 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:05:54.443 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:54 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:05:54.443 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:54 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]: dispatch 2026-03-10T13:05:54.444 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:54 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:05:54.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:54 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:05:54.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:54 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]: dispatch 2026-03-10T13:05:54.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:54 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:05:55.408 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:55 vm00 ceph-mon[47364]: pgmap v35: 0 pgs: ; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-10T13:05:55.408 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:55 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]': finished 2026-03-10T13:05:55.408 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:55 vm00 ceph-mon[47364]: osdmap e16: 4 total, 3 up, 4 in 2026-03-10T13:05:55.408 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:55 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:05:55.408 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:55 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch 2026-03-10T13:05:55.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:55 vm00 ceph-mon[51670]: pgmap v35: 0 pgs: ; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-10T13:05:55.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:55 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]': finished 2026-03-10T13:05:55.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:55 vm00 ceph-mon[51670]: osdmap e16: 4 total, 3 up, 4 in 2026-03-10T13:05:55.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:55 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:05:55.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:55 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch 2026-03-10T13:05:55.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:55 vm08 ceph-mon[49535]: pgmap v35: 0 pgs: ; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-10T13:05:55.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:55 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]': finished 2026-03-10T13:05:55.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:55 vm08 ceph-mon[49535]: osdmap e16: 4 total, 3 up, 4 in 2026-03-10T13:05:55.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:55 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:05:55.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:55 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch 2026-03-10T13:05:56.627 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:56 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-03-10T13:05:56.627 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:56 vm00 ceph-mon[47364]: osdmap e17: 4 total, 3 up, 4 in 2026-03-10T13:05:56.627 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:56 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:05:56.627 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:56 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-10T13:05:56.627 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:56 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:56.627 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:56 vm00 ceph-mon[47364]: Deploying daemon osd.3 on vm00 2026-03-10T13:05:56.627 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:56 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-03-10T13:05:56.628 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:56 vm00 ceph-mon[51670]: osdmap e17: 4 total, 3 up, 4 in 2026-03-10T13:05:56.628 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:56 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:05:56.628 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:56 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-10T13:05:56.628 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:56 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:56.628 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:56 vm00 ceph-mon[51670]: Deploying daemon osd.3 on vm00 2026-03-10T13:05:56.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:56 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-03-10T13:05:56.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:56 vm08 ceph-mon[49535]: osdmap e17: 4 total, 3 up, 4 in 2026-03-10T13:05:56.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:56 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:05:56.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:56 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-10T13:05:56.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:56 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:56.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:56 vm08 ceph-mon[49535]: Deploying daemon osd.3 on vm00 2026-03-10T13:05:57.491 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:57 vm00 ceph-mon[47364]: pgmap v38: 1 pgs: 1 unknown; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-10T13:05:57.491 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:57 vm00 ceph-mon[47364]: osdmap e18: 4 total, 3 up, 4 in 2026-03-10T13:05:57.491 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:57 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:05:57.491 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:57 vm00 ceph-mon[51670]: pgmap v38: 1 pgs: 1 unknown; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-10T13:05:57.491 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:57 vm00 ceph-mon[51670]: osdmap e18: 4 total, 3 up, 4 in 2026-03-10T13:05:57.491 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:57 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:05:57.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:57 vm08 ceph-mon[49535]: pgmap v38: 1 pgs: 1 unknown; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-10T13:05:57.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:57 vm08 ceph-mon[49535]: osdmap e18: 4 total, 3 up, 4 in 2026-03-10T13:05:57.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:57 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:05:58.020 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:05:57 vm00 sudo[63099]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vde 2026-03-10T13:05:58.020 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:05:57 vm00 sudo[63099]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-10T13:05:58.020 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:05:57 vm00 sudo[63099]: pam_unix(sudo:session): session closed for user root 2026-03-10T13:05:58.020 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:05:58 vm00 sudo[63178]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vdd 2026-03-10T13:05:58.362 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:05:58 vm00 sudo[63178]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-10T13:05:58.362 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:05:58 vm00 sudo[63178]: pam_unix(sudo:session): session closed for user root 2026-03-10T13:05:58.362 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:05:58 vm00 sudo[63230]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vdc 2026-03-10T13:05:58.362 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:05:58 vm00 sudo[63230]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-10T13:05:58.363 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:05:58 vm00 sudo[63230]: pam_unix(sudo:session): session closed for user root 2026-03-10T13:05:58.627 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:58 vm00 sudo[63312]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vda 2026-03-10T13:05:58.627 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:58 vm00 sudo[63312]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-10T13:05:58.627 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:58 vm00 sudo[63312]: pam_unix(sudo:session): session closed for user root 2026-03-10T13:05:59.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:58 vm00 ceph-mon[47364]: pgmap v40: 1 pgs: 1 unknown; 0 B data, 16 MiB used, 60 GiB / 60 GiB avail 2026-03-10T13:05:59.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:58 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:59.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:58 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:59.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:58 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:59.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:58 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:05:59.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:58 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:05:59.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:58 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:05:59.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:58 vm00 ceph-mon[47364]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T13:05:59.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:58 vm00 ceph-mon[47364]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T13:05:59.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:58 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:05:59.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:58 vm00 sudo[63391]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vda 2026-03-10T13:05:59.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:58 vm00 sudo[63391]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-10T13:05:59.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:58 vm00 sudo[63391]: pam_unix(sudo:session): session closed for user root 2026-03-10T13:05:59.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:58 vm00 ceph-mon[51670]: pgmap v40: 1 pgs: 1 unknown; 0 B data, 16 MiB used, 60 GiB / 60 GiB avail 2026-03-10T13:05:59.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:58 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:59.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:58 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:59.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:58 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:59.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:58 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:05:59.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:58 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:05:59.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:58 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:05:59.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:58 vm00 ceph-mon[51670]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T13:05:59.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:58 vm00 ceph-mon[51670]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T13:05:59.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:58 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:05:59.041 INFO:teuthology.orchestra.run.vm00.stdout:Created osd(s) 3 on host 'vm00' 2026-03-10T13:05:59.100 DEBUG:teuthology.orchestra.run.vm00:osd.3> sudo journalctl -f -n 0 -u ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.3.service 2026-03-10T13:05:59.104 INFO:tasks.cephadm:Deploying osd.4 on vm08 with /dev/vde... 2026-03-10T13:05:59.104 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- lvm zap /dev/vde 2026-03-10T13:05:59.265 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:59 vm08 sudo[51389]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vda 2026-03-10T13:05:59.265 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:59 vm08 sudo[51389]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-10T13:05:59.265 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:59 vm08 sudo[51389]: pam_unix(sudo:session): session closed for user root 2026-03-10T13:05:59.265 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:59 vm08 ceph-mon[49535]: pgmap v40: 1 pgs: 1 unknown; 0 B data, 16 MiB used, 60 GiB / 60 GiB avail 2026-03-10T13:05:59.265 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:59 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:59.265 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:59 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:05:59.265 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:59 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:05:59.265 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:59 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:05:59.265 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:59 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:05:59.265 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:59 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:05:59.265 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:59 vm08 ceph-mon[49535]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T13:05:59.265 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:59 vm08 ceph-mon[49535]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T13:05:59.266 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:59 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:05:59.627 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:05:59 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3[62974]: 2026-03-10T13:05:59.465+0000 7f4d733373c0 -1 osd.3 0 log_to_monitors true 2026-03-10T13:05:59.741 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-10T13:05:59.754 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph orch daemon add osd vm08:/dev/vde 2026-03-10T13:05:59.941 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:59 vm08 ceph-mon[49535]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T13:05:59.941 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:59 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:05:59.941 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:59 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:05:59.941 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:59 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:05:59.941 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:59 vm08 ceph-mon[49535]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T13:05:59.941 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:59 vm08 ceph-mon[49535]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T13:05:59.941 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:59 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:05:59.941 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:59 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:05:59.941 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:59 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:05:59.941 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:59 vm08 ceph-mon[49535]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T13:05:59.941 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:59 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:59.941 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:59 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:05:59.941 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:59 vm08 ceph-mon[49535]: from='osd.3 [v2:192.168.123.100:6826/441174892,v1:192.168.123.100:6827/441174892]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T13:05:59.941 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:05:59 vm08 ceph-mon[49535]: mgrmap e15: y(active, since 66s), standbys: x 2026-03-10T13:06:00.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:59 vm00 ceph-mon[47364]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T13:06:00.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:59 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:06:00.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:59 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:06:00.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:59 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:06:00.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:59 vm00 ceph-mon[47364]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T13:06:00.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:59 vm00 ceph-mon[47364]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T13:06:00.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:59 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:06:00.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:59 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:06:00.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:59 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:06:00.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:59 vm00 ceph-mon[47364]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T13:06:00.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:59 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:00.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:59 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:00.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:59 vm00 ceph-mon[47364]: from='osd.3 [v2:192.168.123.100:6826/441174892,v1:192.168.123.100:6827/441174892]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T13:06:00.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:05:59 vm00 ceph-mon[47364]: mgrmap e15: y(active, since 66s), standbys: x 2026-03-10T13:06:00.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:59 vm00 ceph-mon[51670]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T13:06:00.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:59 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:06:00.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:59 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:06:00.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:59 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:06:00.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:59 vm00 ceph-mon[51670]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T13:06:00.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:59 vm00 ceph-mon[51670]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T13:06:00.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:59 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:06:00.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:59 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:06:00.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:59 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:06:00.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:59 vm00 ceph-mon[51670]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T13:06:00.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:59 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:00.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:59 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:00.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:59 vm00 ceph-mon[51670]: from='osd.3 [v2:192.168.123.100:6826/441174892,v1:192.168.123.100:6827/441174892]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T13:06:00.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:05:59 vm00 ceph-mon[51670]: mgrmap e15: y(active, since 66s), standbys: x 2026-03-10T13:06:01.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[47364]: pgmap v41: 1 pgs: 1 active+clean; 449 KiB data, 16 MiB used, 60 GiB / 60 GiB avail 2026-03-10T13:06:01.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[47364]: from='osd.3 [v2:192.168.123.100:6826/441174892,v1:192.168.123.100:6827/441174892]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-10T13:06:01.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[47364]: osdmap e19: 4 total, 3 up, 4 in 2026-03-10T13:06:01.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:06:01.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[47364]: from='osd.3 [v2:192.168.123.100:6826/441174892,v1:192.168.123.100:6827/441174892]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:06:01.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[47364]: from='client.24155 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm08:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:06:01.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T13:06:01.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T13:06:01.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:01.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:01.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:01.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:01.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:06:01.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:01.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:06:01.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:01.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[47364]: from='client.? 192.168.123.108:0/2292623221' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "2ec681f9-baf2-471e-8b59-1a1b47be1367"}]: dispatch 2026-03-10T13:06:01.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[47364]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "2ec681f9-baf2-471e-8b59-1a1b47be1367"}]: dispatch 2026-03-10T13:06:01.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[47364]: from='osd.3 [v2:192.168.123.100:6826/441174892,v1:192.168.123.100:6827/441174892]' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-10T13:06:01.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[47364]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "2ec681f9-baf2-471e-8b59-1a1b47be1367"}]': finished 2026-03-10T13:06:01.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[47364]: osdmap e20: 5 total, 3 up, 5 in 2026-03-10T13:06:01.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:06:01.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:06:01.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:06:01.504 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:06:01 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3[62974]: 2026-03-10T13:06:01.003+0000 7f4d6b53d700 -1 osd.3 0 waiting for initial osdmap 2026-03-10T13:06:01.504 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:06:01 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3[62974]: 2026-03-10T13:06:01.017+0000 7f4d63ed0700 -1 osd.3 20 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T13:06:01.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[51670]: pgmap v41: 1 pgs: 1 active+clean; 449 KiB data, 16 MiB used, 60 GiB / 60 GiB avail 2026-03-10T13:06:01.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[51670]: from='osd.3 [v2:192.168.123.100:6826/441174892,v1:192.168.123.100:6827/441174892]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-10T13:06:01.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[51670]: osdmap e19: 4 total, 3 up, 4 in 2026-03-10T13:06:01.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:06:01.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[51670]: from='osd.3 [v2:192.168.123.100:6826/441174892,v1:192.168.123.100:6827/441174892]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:06:01.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[51670]: from='client.24155 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm08:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:06:01.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T13:06:01.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T13:06:01.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:01.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:01.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:01.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:01.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:06:01.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:01.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:06:01.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:01.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[51670]: from='client.? 192.168.123.108:0/2292623221' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "2ec681f9-baf2-471e-8b59-1a1b47be1367"}]: dispatch 2026-03-10T13:06:01.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[51670]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "2ec681f9-baf2-471e-8b59-1a1b47be1367"}]: dispatch 2026-03-10T13:06:01.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[51670]: from='osd.3 [v2:192.168.123.100:6826/441174892,v1:192.168.123.100:6827/441174892]' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-10T13:06:01.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[51670]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "2ec681f9-baf2-471e-8b59-1a1b47be1367"}]': finished 2026-03-10T13:06:01.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[51670]: osdmap e20: 5 total, 3 up, 5 in 2026-03-10T13:06:01.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:06:01.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:06:01.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:01 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:06:01.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:01 vm08 ceph-mon[49535]: pgmap v41: 1 pgs: 1 active+clean; 449 KiB data, 16 MiB used, 60 GiB / 60 GiB avail 2026-03-10T13:06:01.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:01 vm08 ceph-mon[49535]: from='osd.3 [v2:192.168.123.100:6826/441174892,v1:192.168.123.100:6827/441174892]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-10T13:06:01.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:01 vm08 ceph-mon[49535]: osdmap e19: 4 total, 3 up, 4 in 2026-03-10T13:06:01.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:01 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:06:01.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:01 vm08 ceph-mon[49535]: from='osd.3 [v2:192.168.123.100:6826/441174892,v1:192.168.123.100:6827/441174892]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:06:01.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:01 vm08 ceph-mon[49535]: from='client.24155 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm08:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:06:01.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:01 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T13:06:01.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:01 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T13:06:01.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:01 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:01.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:01 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:01.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:01 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:01.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:01 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:01.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:01 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:06:01.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:01 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:01.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:01 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:06:01.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:01 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:01.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:01 vm08 ceph-mon[49535]: from='client.? 192.168.123.108:0/2292623221' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "2ec681f9-baf2-471e-8b59-1a1b47be1367"}]: dispatch 2026-03-10T13:06:01.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:01 vm08 ceph-mon[49535]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "2ec681f9-baf2-471e-8b59-1a1b47be1367"}]: dispatch 2026-03-10T13:06:01.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:01 vm08 ceph-mon[49535]: from='osd.3 [v2:192.168.123.100:6826/441174892,v1:192.168.123.100:6827/441174892]' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-03-10T13:06:01.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:01 vm08 ceph-mon[49535]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "2ec681f9-baf2-471e-8b59-1a1b47be1367"}]': finished 2026-03-10T13:06:01.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:01 vm08 ceph-mon[49535]: osdmap e20: 5 total, 3 up, 5 in 2026-03-10T13:06:01.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:01 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:06:01.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:01 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:06:01.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:01 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:06:02.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:02 vm00 ceph-mon[47364]: Detected new or changed devices on vm00 2026-03-10T13:06:02.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:02 vm00 ceph-mon[47364]: from='client.? 192.168.123.108:0/213140729' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T13:06:02.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:02 vm00 ceph-mon[47364]: osd.3 [v2:192.168.123.100:6826/441174892,v1:192.168.123.100:6827/441174892] boot 2026-03-10T13:06:02.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:02 vm00 ceph-mon[47364]: osdmap e21: 5 total, 4 up, 5 in 2026-03-10T13:06:02.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:02 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:06:02.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:02 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:06:02.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:02 vm00 ceph-mon[51670]: Detected new or changed devices on vm00 2026-03-10T13:06:02.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:02 vm00 ceph-mon[51670]: from='client.? 192.168.123.108:0/213140729' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T13:06:02.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:02 vm00 ceph-mon[51670]: osd.3 [v2:192.168.123.100:6826/441174892,v1:192.168.123.100:6827/441174892] boot 2026-03-10T13:06:02.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:02 vm00 ceph-mon[51670]: osdmap e21: 5 total, 4 up, 5 in 2026-03-10T13:06:02.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:02 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:06:02.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:02 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:06:02.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:02 vm08 ceph-mon[49535]: Detected new or changed devices on vm00 2026-03-10T13:06:02.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:02 vm08 ceph-mon[49535]: from='client.? 192.168.123.108:0/213140729' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T13:06:02.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:02 vm08 ceph-mon[49535]: osd.3 [v2:192.168.123.100:6826/441174892,v1:192.168.123.100:6827/441174892] boot 2026-03-10T13:06:02.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:02 vm08 ceph-mon[49535]: osdmap e21: 5 total, 4 up, 5 in 2026-03-10T13:06:02.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:02 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:06:02.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:02 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:06:03.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:03 vm00 ceph-mon[47364]: purged_snaps scrub starts 2026-03-10T13:06:03.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:03 vm00 ceph-mon[47364]: purged_snaps scrub ok 2026-03-10T13:06:03.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:03 vm00 ceph-mon[47364]: pgmap v44: 1 pgs: 1 active+clean; 449 KiB data, 16 MiB used, 60 GiB / 60 GiB avail 2026-03-10T13:06:03.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:03 vm00 ceph-mon[51670]: purged_snaps scrub starts 2026-03-10T13:06:03.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:03 vm00 ceph-mon[51670]: purged_snaps scrub ok 2026-03-10T13:06:03.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:03 vm00 ceph-mon[51670]: pgmap v44: 1 pgs: 1 active+clean; 449 KiB data, 16 MiB used, 60 GiB / 60 GiB avail 2026-03-10T13:06:03.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:03 vm08 ceph-mon[49535]: purged_snaps scrub starts 2026-03-10T13:06:03.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:03 vm08 ceph-mon[49535]: purged_snaps scrub ok 2026-03-10T13:06:03.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:03 vm08 ceph-mon[49535]: pgmap v44: 1 pgs: 1 active+clean; 449 KiB data, 16 MiB used, 60 GiB / 60 GiB avail 2026-03-10T13:06:04.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:04 vm08 ceph-mon[49535]: osdmap e22: 5 total, 4 up, 5 in 2026-03-10T13:06:04.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:04 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:06:04.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:04 vm00 ceph-mon[47364]: osdmap e22: 5 total, 4 up, 5 in 2026-03-10T13:06:04.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:04 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:06:04.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:04 vm00 ceph-mon[51670]: osdmap e22: 5 total, 4 up, 5 in 2026-03-10T13:06:04.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:04 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:06:05.238 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:05 vm08 ceph-mon[49535]: pgmap v47: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail; 2.2 KiB/s rd, 65 KiB/s wr, 5 op/s 2026-03-10T13:06:05.238 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:05 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-10T13:06:05.238 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:05 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:05.238 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:05 vm08 ceph-mon[49535]: Deploying daemon osd.4 on vm08 2026-03-10T13:06:05.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:05 vm00 ceph-mon[47364]: pgmap v47: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail; 2.2 KiB/s rd, 65 KiB/s wr, 5 op/s 2026-03-10T13:06:05.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:05 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-10T13:06:05.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:05 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:05.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:05 vm00 ceph-mon[47364]: Deploying daemon osd.4 on vm08 2026-03-10T13:06:05.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:05 vm00 ceph-mon[51670]: pgmap v47: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail; 2.2 KiB/s rd, 65 KiB/s wr, 5 op/s 2026-03-10T13:06:05.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:05 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-10T13:06:05.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:05 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:05.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:05 vm00 ceph-mon[51670]: Deploying daemon osd.4 on vm08 2026-03-10T13:06:06.935 INFO:teuthology.orchestra.run.vm08.stdout:Created osd(s) 4 on host 'vm08' 2026-03-10T13:06:07.001 DEBUG:teuthology.orchestra.run.vm08:osd.4> sudo journalctl -f -n 0 -u ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.4.service 2026-03-10T13:06:07.003 INFO:tasks.cephadm:Deploying osd.5 on vm08 with /dev/vdd... 2026-03-10T13:06:07.003 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- lvm zap /dev/vdd 2026-03-10T13:06:07.226 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:07 vm08 ceph-mon[49535]: pgmap v48: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail; 1.7 KiB/s rd, 48 KiB/s wr, 4 op/s 2026-03-10T13:06:07.226 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:07 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:07.226 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:07 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:06:07.226 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:07 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:07.226 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:07 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:06:07.226 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:07 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:07.226 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:07 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:07.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:07 vm00 ceph-mon[47364]: pgmap v48: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail; 1.7 KiB/s rd, 48 KiB/s wr, 4 op/s 2026-03-10T13:06:07.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:07 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:07.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:07 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:06:07.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:07 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:07.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:07 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:06:07.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:07 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:07.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:07 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:07.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:07 vm00 ceph-mon[51670]: pgmap v48: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail; 1.7 KiB/s rd, 48 KiB/s wr, 4 op/s 2026-03-10T13:06:07.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:07 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:07.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:07 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:06:07.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:07 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:07.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:07 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:06:07.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:07 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:07.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:07 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:07.790 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-10T13:06:07.804 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph orch daemon add osd vm08:/dev/vdd 2026-03-10T13:06:08.663 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:06:08 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4[53134]: 2026-03-10T13:06:08.418+0000 7fde1e5a63c0 -1 osd.4 0 log_to_monitors true 2026-03-10T13:06:09.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:09 vm00 ceph-mon[47364]: pgmap v49: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail; 1.4 KiB/s rd, 40 KiB/s wr, 3 op/s 2026-03-10T13:06:09.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:09 vm00 ceph-mon[47364]: from='client.24211 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm08:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:06:09.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:09 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T13:06:09.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:09 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T13:06:09.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:09 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:09.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:09 vm00 ceph-mon[47364]: from='osd.4 [v2:192.168.123.108:6800/1094633829,v1:192.168.123.108:6801/1094633829]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T13:06:09.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:09 vm00 ceph-mon[47364]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T13:06:09.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:09 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:09.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:09 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:09.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:09 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:09.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:09 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:06:09.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:09 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:09.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:09 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:06:09.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:09 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:09.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:09 vm00 ceph-mon[51670]: pgmap v49: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail; 1.4 KiB/s rd, 40 KiB/s wr, 3 op/s 2026-03-10T13:06:09.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:09 vm00 ceph-mon[51670]: from='client.24211 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm08:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:06:09.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:09 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T13:06:09.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:09 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T13:06:09.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:09 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:09.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:09 vm00 ceph-mon[51670]: from='osd.4 [v2:192.168.123.108:6800/1094633829,v1:192.168.123.108:6801/1094633829]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T13:06:09.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:09 vm00 ceph-mon[51670]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T13:06:09.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:09 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:09.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:09 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:09.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:09 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:09.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:09 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:06:09.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:09 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:09.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:09 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:06:09.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:09 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:09.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:09 vm08 ceph-mon[49535]: pgmap v49: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail; 1.4 KiB/s rd, 40 KiB/s wr, 3 op/s 2026-03-10T13:06:09.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:09 vm08 ceph-mon[49535]: from='client.24211 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm08:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:06:09.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:09 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T13:06:09.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:09 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T13:06:09.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:09 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:09.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:09 vm08 ceph-mon[49535]: from='osd.4 [v2:192.168.123.108:6800/1094633829,v1:192.168.123.108:6801/1094633829]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T13:06:09.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:09 vm08 ceph-mon[49535]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T13:06:09.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:09 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:09.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:09 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:09.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:09 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:09.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:09 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:06:09.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:09 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:09.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:09 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:06:09.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:09 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:09.522 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:06:09 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4[53134]: 2026-03-10T13:06:09.259+0000 7fde14fa9700 -1 osd.4 0 waiting for initial osdmap 2026-03-10T13:06:09.522 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:06:09 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4[53134]: 2026-03-10T13:06:09.273+0000 7fde0f940700 -1 osd.4 24 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T13:06:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:10 vm00 ceph-mon[47364]: Detected new or changed devices on vm08 2026-03-10T13:06:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:10 vm00 ceph-mon[47364]: Adjusting osd_memory_target on vm08 to 257.0M 2026-03-10T13:06:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:10 vm00 ceph-mon[47364]: Unable to set osd_memory_target on vm08 to 269530726: error parsing value: Value '269530726' is below minimum 939524096 2026-03-10T13:06:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:10 vm00 ceph-mon[47364]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-10T13:06:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:10 vm00 ceph-mon[47364]: osdmap e23: 5 total, 4 up, 5 in 2026-03-10T13:06:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:10 vm00 ceph-mon[47364]: from='osd.4 [v2:192.168.123.108:6800/1094633829,v1:192.168.123.108:6801/1094633829]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:06:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:10 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:06:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:10 vm00 ceph-mon[47364]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:06:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:10 vm00 ceph-mon[47364]: from='client.? 192.168.123.108:0/3113885332' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "4b07141b-58eb-441e-a2a5-b6422715a810"}]: dispatch 2026-03-10T13:06:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:10 vm00 ceph-mon[47364]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "4b07141b-58eb-441e-a2a5-b6422715a810"}]: dispatch 2026-03-10T13:06:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:10 vm00 ceph-mon[47364]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm08", "root=default"]}]': finished 2026-03-10T13:06:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:10 vm00 ceph-mon[47364]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "4b07141b-58eb-441e-a2a5-b6422715a810"}]': finished 2026-03-10T13:06:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:10 vm00 ceph-mon[47364]: osdmap e24: 6 total, 4 up, 6 in 2026-03-10T13:06:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:10 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:06:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:10 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:06:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:10 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:06:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:10 vm00 ceph-mon[47364]: from='client.? 192.168.123.108:0/1235065091' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T13:06:10.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:10 vm00 ceph-mon[51670]: Detected new or changed devices on vm08 2026-03-10T13:06:10.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:10 vm00 ceph-mon[51670]: Adjusting osd_memory_target on vm08 to 257.0M 2026-03-10T13:06:10.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:10 vm00 ceph-mon[51670]: Unable to set osd_memory_target on vm08 to 269530726: error parsing value: Value '269530726' is below minimum 939524096 2026-03-10T13:06:10.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:10 vm00 ceph-mon[51670]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-10T13:06:10.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:10 vm00 ceph-mon[51670]: osdmap e23: 5 total, 4 up, 5 in 2026-03-10T13:06:10.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:10 vm00 ceph-mon[51670]: from='osd.4 [v2:192.168.123.108:6800/1094633829,v1:192.168.123.108:6801/1094633829]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:06:10.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:10 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:06:10.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:10 vm00 ceph-mon[51670]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:06:10.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:10 vm00 ceph-mon[51670]: from='client.? 192.168.123.108:0/3113885332' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "4b07141b-58eb-441e-a2a5-b6422715a810"}]: dispatch 2026-03-10T13:06:10.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:10 vm00 ceph-mon[51670]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "4b07141b-58eb-441e-a2a5-b6422715a810"}]: dispatch 2026-03-10T13:06:10.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:10 vm00 ceph-mon[51670]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm08", "root=default"]}]': finished 2026-03-10T13:06:10.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:10 vm00 ceph-mon[51670]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "4b07141b-58eb-441e-a2a5-b6422715a810"}]': finished 2026-03-10T13:06:10.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:10 vm00 ceph-mon[51670]: osdmap e24: 6 total, 4 up, 6 in 2026-03-10T13:06:10.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:10 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:06:10.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:10 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:06:10.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:10 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:06:10.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:10 vm00 ceph-mon[51670]: from='client.? 192.168.123.108:0/1235065091' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T13:06:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:10 vm08 ceph-mon[49535]: Detected new or changed devices on vm08 2026-03-10T13:06:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:10 vm08 ceph-mon[49535]: Adjusting osd_memory_target on vm08 to 257.0M 2026-03-10T13:06:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:10 vm08 ceph-mon[49535]: Unable to set osd_memory_target on vm08 to 269530726: error parsing value: Value '269530726' is below minimum 939524096 2026-03-10T13:06:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:10 vm08 ceph-mon[49535]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-10T13:06:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:10 vm08 ceph-mon[49535]: osdmap e23: 5 total, 4 up, 5 in 2026-03-10T13:06:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:10 vm08 ceph-mon[49535]: from='osd.4 [v2:192.168.123.108:6800/1094633829,v1:192.168.123.108:6801/1094633829]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:06:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:10 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:06:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:10 vm08 ceph-mon[49535]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:06:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:10 vm08 ceph-mon[49535]: from='client.? 192.168.123.108:0/3113885332' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "4b07141b-58eb-441e-a2a5-b6422715a810"}]: dispatch 2026-03-10T13:06:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:10 vm08 ceph-mon[49535]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "4b07141b-58eb-441e-a2a5-b6422715a810"}]: dispatch 2026-03-10T13:06:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:10 vm08 ceph-mon[49535]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm08", "root=default"]}]': finished 2026-03-10T13:06:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:10 vm08 ceph-mon[49535]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "4b07141b-58eb-441e-a2a5-b6422715a810"}]': finished 2026-03-10T13:06:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:10 vm08 ceph-mon[49535]: osdmap e24: 6 total, 4 up, 6 in 2026-03-10T13:06:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:10 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:06:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:10 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:06:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:10 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:06:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:10 vm08 ceph-mon[49535]: from='client.? 192.168.123.108:0/1235065091' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T13:06:11.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:11 vm00 ceph-mon[47364]: pgmap v52: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail 2026-03-10T13:06:11.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:11 vm00 ceph-mon[47364]: osd.4 [v2:192.168.123.108:6800/1094633829,v1:192.168.123.108:6801/1094633829] boot 2026-03-10T13:06:11.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:11 vm00 ceph-mon[47364]: osdmap e25: 6 total, 5 up, 6 in 2026-03-10T13:06:11.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:11 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:06:11.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:11 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:06:11.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:11 vm00 ceph-mon[51670]: pgmap v52: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail 2026-03-10T13:06:11.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:11 vm00 ceph-mon[51670]: osd.4 [v2:192.168.123.108:6800/1094633829,v1:192.168.123.108:6801/1094633829] boot 2026-03-10T13:06:11.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:11 vm00 ceph-mon[51670]: osdmap e25: 6 total, 5 up, 6 in 2026-03-10T13:06:11.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:11 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:06:11.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:11 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:06:11.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:11 vm08 ceph-mon[49535]: pgmap v52: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail 2026-03-10T13:06:11.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:11 vm08 ceph-mon[49535]: osd.4 [v2:192.168.123.108:6800/1094633829,v1:192.168.123.108:6801/1094633829] boot 2026-03-10T13:06:11.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:11 vm08 ceph-mon[49535]: osdmap e25: 6 total, 5 up, 6 in 2026-03-10T13:06:11.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:11 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:06:11.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:11 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:06:12.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:12 vm08 ceph-mon[49535]: purged_snaps scrub starts 2026-03-10T13:06:12.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:12 vm08 ceph-mon[49535]: purged_snaps scrub ok 2026-03-10T13:06:12.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:12 vm08 ceph-mon[49535]: osdmap e26: 6 total, 5 up, 6 in 2026-03-10T13:06:12.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:12 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:06:12.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:12 vm08 ceph-mon[49535]: osdmap e27: 6 total, 5 up, 6 in 2026-03-10T13:06:12.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:12 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:06:12.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:12 vm00 ceph-mon[47364]: purged_snaps scrub starts 2026-03-10T13:06:12.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:12 vm00 ceph-mon[47364]: purged_snaps scrub ok 2026-03-10T13:06:12.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:12 vm00 ceph-mon[47364]: osdmap e26: 6 total, 5 up, 6 in 2026-03-10T13:06:12.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:12 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:06:12.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:12 vm00 ceph-mon[47364]: osdmap e27: 6 total, 5 up, 6 in 2026-03-10T13:06:12.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:12 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:06:12.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:12 vm00 ceph-mon[51670]: purged_snaps scrub starts 2026-03-10T13:06:12.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:12 vm00 ceph-mon[51670]: purged_snaps scrub ok 2026-03-10T13:06:12.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:12 vm00 ceph-mon[51670]: osdmap e26: 6 total, 5 up, 6 in 2026-03-10T13:06:12.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:12 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:06:12.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:12 vm00 ceph-mon[51670]: osdmap e27: 6 total, 5 up, 6 in 2026-03-10T13:06:12.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:12 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:06:13.559 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:13 vm08 ceph-mon[49535]: pgmap v55: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail 2026-03-10T13:06:13.559 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:13 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-10T13:06:13.559 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:13 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:13.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:13 vm00 ceph-mon[47364]: pgmap v55: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail 2026-03-10T13:06:13.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:13 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-10T13:06:13.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:13 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:13.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:13 vm00 ceph-mon[51670]: pgmap v55: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail 2026-03-10T13:06:13.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:13 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-10T13:06:13.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:13 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:14.421 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:14 vm08 ceph-mon[49535]: Deploying daemon osd.5 on vm08 2026-03-10T13:06:14.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:14 vm00 ceph-mon[47364]: Deploying daemon osd.5 on vm08 2026-03-10T13:06:14.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:14 vm00 ceph-mon[51670]: Deploying daemon osd.5 on vm08 2026-03-10T13:06:15.340 INFO:teuthology.orchestra.run.vm08.stdout:Created osd(s) 5 on host 'vm08' 2026-03-10T13:06:15.414 DEBUG:teuthology.orchestra.run.vm08:osd.5> sudo journalctl -f -n 0 -u ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.5.service 2026-03-10T13:06:15.416 INFO:tasks.cephadm:Deploying osd.6 on vm08 with /dev/vdc... 2026-03-10T13:06:15.416 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- lvm zap /dev/vdc 2026-03-10T13:06:15.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:15 vm08 ceph-mon[49535]: pgmap v57: 1 pgs: 1 active+recovering+degraded; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 2/6 objects misplaced (33.333%) 2026-03-10T13:06:15.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:15 vm08 ceph-mon[49535]: Health check failed: Degraded data redundancy: 1 pg degraded (PG_DEGRADED) 2026-03-10T13:06:15.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:15 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:15.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:15 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:06:15.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:15 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:15.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:15 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:06:15.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:15 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:15.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:15 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:15.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:15 vm00 ceph-mon[47364]: pgmap v57: 1 pgs: 1 active+recovering+degraded; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 2/6 objects misplaced (33.333%) 2026-03-10T13:06:15.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:15 vm00 ceph-mon[47364]: Health check failed: Degraded data redundancy: 1 pg degraded (PG_DEGRADED) 2026-03-10T13:06:15.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:15 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:15.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:15 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:06:15.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:15 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:15.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:15 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:06:15.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:15 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:15.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:15 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:15.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:15 vm00 ceph-mon[51670]: pgmap v57: 1 pgs: 1 active+recovering+degraded; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 2/6 objects misplaced (33.333%) 2026-03-10T13:06:15.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:15 vm00 ceph-mon[51670]: Health check failed: Degraded data redundancy: 1 pg degraded (PG_DEGRADED) 2026-03-10T13:06:15.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:15 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:15.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:15 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:06:15.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:15 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:15.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:15 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:06:15.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:15 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:15.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:15 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:16.145 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-10T13:06:16.162 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph orch daemon add osd vm08:/dev/vdc 2026-03-10T13:06:16.248 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:06:15 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5[55920]: 2026-03-10T13:06:15.993+0000 7f1e43df53c0 -1 osd.5 0 log_to_monitors true 2026-03-10T13:06:16.501 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:16 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:16.501 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:16 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:06:16.501 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:16 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:16.501 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:16 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:06:16.501 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:16 vm08 ceph-mon[49535]: from='osd.5 [v2:192.168.123.108:6808/658387712,v1:192.168.123.108:6809/658387712]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T13:06:16.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:16 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:16.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:16 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:06:16.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:16 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:16.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:16 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:06:16.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:16 vm00 ceph-mon[47364]: from='osd.5 [v2:192.168.123.108:6808/658387712,v1:192.168.123.108:6809/658387712]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T13:06:16.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:16 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:16.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:16 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:06:16.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:16 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:16.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:16 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:06:16.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:16 vm00 ceph-mon[51670]: from='osd.5 [v2:192.168.123.108:6808/658387712,v1:192.168.123.108:6809/658387712]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T13:06:17.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:17 vm00 ceph-mon[47364]: pgmap v58: 1 pgs: 1 active+recovering+degraded; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 2/6 objects misplaced (33.333%) 2026-03-10T13:06:17.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:17 vm00 ceph-mon[47364]: from='osd.5 [v2:192.168.123.108:6808/658387712,v1:192.168.123.108:6809/658387712]' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-10T13:06:17.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:17 vm00 ceph-mon[47364]: osdmap e28: 6 total, 5 up, 6 in 2026-03-10T13:06:17.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:17 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:06:17.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:17 vm00 ceph-mon[47364]: from='osd.5 [v2:192.168.123.108:6808/658387712,v1:192.168.123.108:6809/658387712]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:06:17.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:17 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T13:06:17.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:17 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T13:06:17.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:17 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:17.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:17 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:17.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:17 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:17.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:17 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:17.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:17 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:17.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:17 vm00 ceph-mon[51670]: pgmap v58: 1 pgs: 1 active+recovering+degraded; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 2/6 objects misplaced (33.333%) 2026-03-10T13:06:17.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:17 vm00 ceph-mon[51670]: from='osd.5 [v2:192.168.123.108:6808/658387712,v1:192.168.123.108:6809/658387712]' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-10T13:06:17.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:17 vm00 ceph-mon[51670]: osdmap e28: 6 total, 5 up, 6 in 2026-03-10T13:06:17.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:17 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:06:17.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:17 vm00 ceph-mon[51670]: from='osd.5 [v2:192.168.123.108:6808/658387712,v1:192.168.123.108:6809/658387712]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:06:17.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:17 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T13:06:17.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:17 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T13:06:17.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:17 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:17.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:17 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:17.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:17 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:17.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:17 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:17.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:17 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:17.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:17 vm08 ceph-mon[49535]: pgmap v58: 1 pgs: 1 active+recovering+degraded; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 2/6 objects misplaced (33.333%) 2026-03-10T13:06:17.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:17 vm08 ceph-mon[49535]: from='osd.5 [v2:192.168.123.108:6808/658387712,v1:192.168.123.108:6809/658387712]' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-10T13:06:17.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:17 vm08 ceph-mon[49535]: osdmap e28: 6 total, 5 up, 6 in 2026-03-10T13:06:17.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:17 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:06:17.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:17 vm08 ceph-mon[49535]: from='osd.5 [v2:192.168.123.108:6808/658387712,v1:192.168.123.108:6809/658387712]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:06:17.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:17 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T13:06:17.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:17 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T13:06:17.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:17 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:17.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:17 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:17.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:17 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:17.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:17 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:17.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:17 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:17.772 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:06:17 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5[55920]: 2026-03-10T13:06:17.353+0000 7f1e3bffb700 -1 osd.5 0 waiting for initial osdmap 2026-03-10T13:06:17.772 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:06:17 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5[55920]: 2026-03-10T13:06:17.367+0000 7f1e35990700 -1 osd.5 29 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T13:06:18.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:18 vm00 ceph-mon[47364]: from='client.24203 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm08:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:06:18.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:18 vm00 ceph-mon[47364]: Detected new or changed devices on vm08 2026-03-10T13:06:18.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:18 vm00 ceph-mon[47364]: Adjusting osd_memory_target on vm08 to 128.5M 2026-03-10T13:06:18.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:18 vm00 ceph-mon[47364]: Unable to set osd_memory_target on vm08 to 134765363: error parsing value: Value '134765363' is below minimum 939524096 2026-03-10T13:06:18.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:18 vm00 ceph-mon[47364]: from='osd.5 [v2:192.168.123.108:6808/658387712,v1:192.168.123.108:6809/658387712]' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm08", "root=default"]}]': finished 2026-03-10T13:06:18.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:18 vm00 ceph-mon[47364]: osdmap e29: 6 total, 5 up, 6 in 2026-03-10T13:06:18.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:18 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:06:18.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:18 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:06:18.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:18 vm00 ceph-mon[47364]: osd.5 [v2:192.168.123.108:6808/658387712,v1:192.168.123.108:6809/658387712] boot 2026-03-10T13:06:18.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:18 vm00 ceph-mon[47364]: osdmap e30: 6 total, 6 up, 6 in 2026-03-10T13:06:18.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:18 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:06:18.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:18 vm00 ceph-mon[47364]: from='client.? 192.168.123.108:0/2878667567' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "8d39583c-2063-4d14-9842-a1a1a8782f74"}]: dispatch 2026-03-10T13:06:18.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:18 vm00 ceph-mon[47364]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "8d39583c-2063-4d14-9842-a1a1a8782f74"}]: dispatch 2026-03-10T13:06:18.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:18 vm00 ceph-mon[47364]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "8d39583c-2063-4d14-9842-a1a1a8782f74"}]': finished 2026-03-10T13:06:18.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:18 vm00 ceph-mon[47364]: osdmap e31: 7 total, 6 up, 7 in 2026-03-10T13:06:18.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:18 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:06:18.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:18 vm00 ceph-mon[47364]: from='client.? 192.168.123.108:0/3811874705' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T13:06:18.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:18 vm00 ceph-mon[51670]: from='client.24203 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm08:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:06:18.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:18 vm00 ceph-mon[51670]: Detected new or changed devices on vm08 2026-03-10T13:06:18.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:18 vm00 ceph-mon[51670]: Adjusting osd_memory_target on vm08 to 128.5M 2026-03-10T13:06:18.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:18 vm00 ceph-mon[51670]: Unable to set osd_memory_target on vm08 to 134765363: error parsing value: Value '134765363' is below minimum 939524096 2026-03-10T13:06:18.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:18 vm00 ceph-mon[51670]: from='osd.5 [v2:192.168.123.108:6808/658387712,v1:192.168.123.108:6809/658387712]' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm08", "root=default"]}]': finished 2026-03-10T13:06:18.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:18 vm00 ceph-mon[51670]: osdmap e29: 6 total, 5 up, 6 in 2026-03-10T13:06:18.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:18 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:06:18.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:18 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:06:18.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:18 vm00 ceph-mon[51670]: osd.5 [v2:192.168.123.108:6808/658387712,v1:192.168.123.108:6809/658387712] boot 2026-03-10T13:06:18.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:18 vm00 ceph-mon[51670]: osdmap e30: 6 total, 6 up, 6 in 2026-03-10T13:06:18.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:18 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:06:18.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:18 vm00 ceph-mon[51670]: from='client.? 192.168.123.108:0/2878667567' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "8d39583c-2063-4d14-9842-a1a1a8782f74"}]: dispatch 2026-03-10T13:06:18.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:18 vm00 ceph-mon[51670]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "8d39583c-2063-4d14-9842-a1a1a8782f74"}]: dispatch 2026-03-10T13:06:18.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:18 vm00 ceph-mon[51670]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "8d39583c-2063-4d14-9842-a1a1a8782f74"}]': finished 2026-03-10T13:06:18.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:18 vm00 ceph-mon[51670]: osdmap e31: 7 total, 6 up, 7 in 2026-03-10T13:06:18.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:18 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:06:18.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:18 vm00 ceph-mon[51670]: from='client.? 192.168.123.108:0/3811874705' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T13:06:18.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:18 vm08 ceph-mon[49535]: from='client.24203 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm08:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:06:18.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:18 vm08 ceph-mon[49535]: Detected new or changed devices on vm08 2026-03-10T13:06:18.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:18 vm08 ceph-mon[49535]: Adjusting osd_memory_target on vm08 to 128.5M 2026-03-10T13:06:18.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:18 vm08 ceph-mon[49535]: Unable to set osd_memory_target on vm08 to 134765363: error parsing value: Value '134765363' is below minimum 939524096 2026-03-10T13:06:18.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:18 vm08 ceph-mon[49535]: from='osd.5 [v2:192.168.123.108:6808/658387712,v1:192.168.123.108:6809/658387712]' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm08", "root=default"]}]': finished 2026-03-10T13:06:18.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:18 vm08 ceph-mon[49535]: osdmap e29: 6 total, 5 up, 6 in 2026-03-10T13:06:18.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:18 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:06:18.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:18 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:06:18.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:18 vm08 ceph-mon[49535]: osd.5 [v2:192.168.123.108:6808/658387712,v1:192.168.123.108:6809/658387712] boot 2026-03-10T13:06:18.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:18 vm08 ceph-mon[49535]: osdmap e30: 6 total, 6 up, 6 in 2026-03-10T13:06:18.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:18 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:06:18.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:18 vm08 ceph-mon[49535]: from='client.? 192.168.123.108:0/2878667567' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "8d39583c-2063-4d14-9842-a1a1a8782f74"}]: dispatch 2026-03-10T13:06:18.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:18 vm08 ceph-mon[49535]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "8d39583c-2063-4d14-9842-a1a1a8782f74"}]: dispatch 2026-03-10T13:06:18.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:18 vm08 ceph-mon[49535]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "8d39583c-2063-4d14-9842-a1a1a8782f74"}]': finished 2026-03-10T13:06:18.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:18 vm08 ceph-mon[49535]: osdmap e31: 7 total, 6 up, 7 in 2026-03-10T13:06:18.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:18 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:06:18.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:18 vm08 ceph-mon[49535]: from='client.? 192.168.123.108:0/3811874705' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T13:06:19.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:19 vm00 ceph-mon[47364]: purged_snaps scrub starts 2026-03-10T13:06:19.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:19 vm00 ceph-mon[47364]: purged_snaps scrub ok 2026-03-10T13:06:19.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:19 vm00 ceph-mon[47364]: pgmap v61: 1 pgs: 1 active+recovering+degraded; 449 KiB data, 29 MiB used, 100 GiB / 100 GiB avail; 2/6 objects misplaced (33.333%) 2026-03-10T13:06:19.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:19 vm00 ceph-mon[47364]: osdmap e32: 7 total, 6 up, 7 in 2026-03-10T13:06:19.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:19 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:06:19.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:19 vm00 ceph-mon[51670]: purged_snaps scrub starts 2026-03-10T13:06:19.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:19 vm00 ceph-mon[51670]: purged_snaps scrub ok 2026-03-10T13:06:19.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:19 vm00 ceph-mon[51670]: pgmap v61: 1 pgs: 1 active+recovering+degraded; 449 KiB data, 29 MiB used, 100 GiB / 100 GiB avail; 2/6 objects misplaced (33.333%) 2026-03-10T13:06:19.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:19 vm00 ceph-mon[51670]: osdmap e32: 7 total, 6 up, 7 in 2026-03-10T13:06:19.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:19 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:06:19.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:19 vm08 ceph-mon[49535]: purged_snaps scrub starts 2026-03-10T13:06:19.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:19 vm08 ceph-mon[49535]: purged_snaps scrub ok 2026-03-10T13:06:19.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:19 vm08 ceph-mon[49535]: pgmap v61: 1 pgs: 1 active+recovering+degraded; 449 KiB data, 29 MiB used, 100 GiB / 100 GiB avail; 2/6 objects misplaced (33.333%) 2026-03-10T13:06:19.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:19 vm08 ceph-mon[49535]: osdmap e32: 7 total, 6 up, 7 in 2026-03-10T13:06:19.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:19 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:06:20.621 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:20 vm08 ceph-mon[49535]: pgmap v65: 1 pgs: 1 peering; 0 B data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-10T13:06:20.621 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:20 vm08 ceph-mon[49535]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 1 pg degraded) 2026-03-10T13:06:20.621 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:20 vm08 ceph-mon[49535]: Cluster is now healthy 2026-03-10T13:06:20.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:20 vm00 ceph-mon[47364]: pgmap v65: 1 pgs: 1 peering; 0 B data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-10T13:06:20.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:20 vm00 ceph-mon[47364]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 1 pg degraded) 2026-03-10T13:06:20.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:20 vm00 ceph-mon[47364]: Cluster is now healthy 2026-03-10T13:06:20.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:20 vm00 ceph-mon[51670]: pgmap v65: 1 pgs: 1 peering; 0 B data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-10T13:06:20.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:20 vm00 ceph-mon[51670]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 1 pg degraded) 2026-03-10T13:06:20.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:20 vm00 ceph-mon[51670]: Cluster is now healthy 2026-03-10T13:06:21.661 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:21 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-10T13:06:21.661 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:21 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:21.661 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:21 vm08 ceph-mon[49535]: Deploying daemon osd.6 on vm08 2026-03-10T13:06:21.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:21 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-10T13:06:21.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:21 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:21.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:21 vm00 ceph-mon[47364]: Deploying daemon osd.6 on vm08 2026-03-10T13:06:21.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:21 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-10T13:06:21.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:21 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:21.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:21 vm00 ceph-mon[51670]: Deploying daemon osd.6 on vm08 2026-03-10T13:06:22.439 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:22 vm08 ceph-mon[49535]: pgmap v66: 1 pgs: 1 peering; 0 B data, 33 MiB used, 120 GiB / 120 GiB avail 2026-03-10T13:06:22.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:22 vm00 ceph-mon[47364]: pgmap v66: 1 pgs: 1 peering; 0 B data, 33 MiB used, 120 GiB / 120 GiB avail 2026-03-10T13:06:22.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:22 vm00 ceph-mon[51670]: pgmap v66: 1 pgs: 1 peering; 0 B data, 33 MiB used, 120 GiB / 120 GiB avail 2026-03-10T13:06:23.742 INFO:teuthology.orchestra.run.vm08.stdout:Created osd(s) 6 on host 'vm08' 2026-03-10T13:06:23.790 DEBUG:teuthology.orchestra.run.vm08:osd.6> sudo journalctl -f -n 0 -u ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.6.service 2026-03-10T13:06:23.792 INFO:tasks.cephadm:Deploying osd.7 on vm08 with /dev/vdb... 2026-03-10T13:06:23.792 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- lvm zap /dev/vdb 2026-03-10T13:06:24.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:23 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:24.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:23 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:06:24.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:23 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:24.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:23 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:06:24.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:23 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:24.023 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:23 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:24.023 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:23 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:24.023 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:23 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:06:24.023 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:23 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:24.023 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:23 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:06:24.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:23 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:24.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:23 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:06:24.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:23 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:24.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:23 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:06:24.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:23 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:24.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:23 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:24.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:23 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:24.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:23 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:06:24.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:23 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:24.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:23 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:06:24.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:23 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:24.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:23 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:06:24.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:23 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:24.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:23 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:06:24.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:23 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:24.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:23 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:24.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:23 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:24.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:23 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:06:24.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:23 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:24.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:23 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:06:24.552 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-10T13:06:24.566 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph orch daemon add osd vm08:/dev/vdb 2026-03-10T13:06:24.987 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:24 vm08 ceph-mon[49535]: pgmap v67: 1 pgs: 1 active+clean; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-10T13:06:24.991 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:06:24 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6[58662]: 2026-03-10T13:06:24.936+0000 7efe4ddc43c0 -1 osd.6 0 log_to_monitors true 2026-03-10T13:06:25.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:24 vm00 ceph-mon[47364]: pgmap v67: 1 pgs: 1 active+clean; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-10T13:06:25.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:24 vm00 ceph-mon[51670]: pgmap v67: 1 pgs: 1 active+clean; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-10T13:06:25.836 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:25 vm08 ceph-mon[49535]: from='osd.6 [v2:192.168.123.108:6816/2473564301,v1:192.168.123.108:6817/2473564301]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T13:06:25.836 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:25 vm08 ceph-mon[49535]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T13:06:25.836 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:25 vm08 ceph-mon[49535]: from='client.14349 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm08:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:06:25.836 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:25 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T13:06:25.836 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:25 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T13:06:25.836 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:25 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:25.836 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:25 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:25.836 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:25 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:25.836 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:25 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:25.836 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:25 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:25.836 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:25 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:26.097 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:06:26 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6[58662]: 2026-03-10T13:06:26.076+0000 7efe447c7700 -1 osd.6 0 waiting for initial osdmap 2026-03-10T13:06:26.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:25 vm00 ceph-mon[47364]: from='osd.6 [v2:192.168.123.108:6816/2473564301,v1:192.168.123.108:6817/2473564301]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T13:06:26.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:25 vm00 ceph-mon[47364]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T13:06:26.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:25 vm00 ceph-mon[47364]: from='client.14349 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm08:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:06:26.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:25 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T13:06:26.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:25 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T13:06:26.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:25 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:26.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:25 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:26.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:25 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:26.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:25 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:26.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:25 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:26.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:25 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:26.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:25 vm00 ceph-mon[51670]: from='osd.6 [v2:192.168.123.108:6816/2473564301,v1:192.168.123.108:6817/2473564301]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T13:06:26.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:25 vm00 ceph-mon[51670]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T13:06:26.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:25 vm00 ceph-mon[51670]: from='client.14349 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm08:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:06:26.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:25 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T13:06:26.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:25 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T13:06:26.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:25 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:26.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:25 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:26.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:25 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:26.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:25 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:26.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:25 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:26.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:25 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:26.521 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:06:26 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6[58662]: 2026-03-10T13:06:26.093+0000 7efe41162700 -1 osd.6 34 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T13:06:27.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:26 vm00 ceph-mon[47364]: pgmap v68: 1 pgs: 1 active+clean; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-10T13:06:27.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:26 vm00 ceph-mon[47364]: Detected new or changed devices on vm08 2026-03-10T13:06:27.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:26 vm00 ceph-mon[47364]: Adjusting osd_memory_target on vm08 to 87737k 2026-03-10T13:06:27.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:26 vm00 ceph-mon[47364]: Unable to set osd_memory_target on vm08 to 89843575: error parsing value: Value '89843575' is below minimum 939524096 2026-03-10T13:06:27.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:26 vm00 ceph-mon[47364]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-10T13:06:27.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:26 vm00 ceph-mon[47364]: osdmap e33: 7 total, 6 up, 7 in 2026-03-10T13:06:27.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:26 vm00 ceph-mon[47364]: from='osd.6 [v2:192.168.123.108:6816/2473564301,v1:192.168.123.108:6817/2473564301]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:06:27.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:26 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:06:27.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:26 vm00 ceph-mon[47364]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:06:27.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:26 vm00 ceph-mon[47364]: from='client.? 192.168.123.108:0/946930533' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "e33bb491-3f0c-40cc-a5ae-770ea8457536"}]: dispatch 2026-03-10T13:06:27.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:26 vm00 ceph-mon[47364]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm08", "root=default"]}]': finished 2026-03-10T13:06:27.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:26 vm00 ceph-mon[47364]: from='client.? 192.168.123.108:0/946930533' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "e33bb491-3f0c-40cc-a5ae-770ea8457536"}]': finished 2026-03-10T13:06:27.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:26 vm00 ceph-mon[47364]: osdmap e34: 8 total, 6 up, 8 in 2026-03-10T13:06:27.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:26 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:06:27.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:26 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:06:27.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:26 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:06:27.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:26 vm00 ceph-mon[47364]: from='client.? 192.168.123.108:0/2178672236' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T13:06:27.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:26 vm00 ceph-mon[51670]: pgmap v68: 1 pgs: 1 active+clean; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-10T13:06:27.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:26 vm00 ceph-mon[51670]: Detected new or changed devices on vm08 2026-03-10T13:06:27.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:26 vm00 ceph-mon[51670]: Adjusting osd_memory_target on vm08 to 87737k 2026-03-10T13:06:27.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:26 vm00 ceph-mon[51670]: Unable to set osd_memory_target on vm08 to 89843575: error parsing value: Value '89843575' is below minimum 939524096 2026-03-10T13:06:27.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:26 vm00 ceph-mon[51670]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-10T13:06:27.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:26 vm00 ceph-mon[51670]: osdmap e33: 7 total, 6 up, 7 in 2026-03-10T13:06:27.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:26 vm00 ceph-mon[51670]: from='osd.6 [v2:192.168.123.108:6816/2473564301,v1:192.168.123.108:6817/2473564301]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:06:27.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:26 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:06:27.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:26 vm00 ceph-mon[51670]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:06:27.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:26 vm00 ceph-mon[51670]: from='client.? 192.168.123.108:0/946930533' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "e33bb491-3f0c-40cc-a5ae-770ea8457536"}]: dispatch 2026-03-10T13:06:27.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:26 vm00 ceph-mon[51670]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm08", "root=default"]}]': finished 2026-03-10T13:06:27.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:26 vm00 ceph-mon[51670]: from='client.? 192.168.123.108:0/946930533' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "e33bb491-3f0c-40cc-a5ae-770ea8457536"}]': finished 2026-03-10T13:06:27.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:26 vm00 ceph-mon[51670]: osdmap e34: 8 total, 6 up, 8 in 2026-03-10T13:06:27.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:26 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:06:27.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:26 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:06:27.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:26 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:06:27.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:26 vm00 ceph-mon[51670]: from='client.? 192.168.123.108:0/2178672236' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T13:06:27.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:26 vm08 ceph-mon[49535]: pgmap v68: 1 pgs: 1 active+clean; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-10T13:06:27.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:26 vm08 ceph-mon[49535]: Detected new or changed devices on vm08 2026-03-10T13:06:27.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:26 vm08 ceph-mon[49535]: Adjusting osd_memory_target on vm08 to 87737k 2026-03-10T13:06:27.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:26 vm08 ceph-mon[49535]: Unable to set osd_memory_target on vm08 to 89843575: error parsing value: Value '89843575' is below minimum 939524096 2026-03-10T13:06:27.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:26 vm08 ceph-mon[49535]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-10T13:06:27.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:26 vm08 ceph-mon[49535]: osdmap e33: 7 total, 6 up, 7 in 2026-03-10T13:06:27.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:26 vm08 ceph-mon[49535]: from='osd.6 [v2:192.168.123.108:6816/2473564301,v1:192.168.123.108:6817/2473564301]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:06:27.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:26 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:06:27.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:26 vm08 ceph-mon[49535]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:06:27.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:26 vm08 ceph-mon[49535]: from='client.? 192.168.123.108:0/946930533' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "e33bb491-3f0c-40cc-a5ae-770ea8457536"}]: dispatch 2026-03-10T13:06:27.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:26 vm08 ceph-mon[49535]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm08", "root=default"]}]': finished 2026-03-10T13:06:27.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:26 vm08 ceph-mon[49535]: from='client.? 192.168.123.108:0/946930533' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "e33bb491-3f0c-40cc-a5ae-770ea8457536"}]': finished 2026-03-10T13:06:27.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:26 vm08 ceph-mon[49535]: osdmap e34: 8 total, 6 up, 8 in 2026-03-10T13:06:27.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:26 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:06:27.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:26 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:06:27.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:26 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:06:27.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:26 vm08 ceph-mon[49535]: from='client.? 192.168.123.108:0/2178672236' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T13:06:28.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:28 vm00 ceph-mon[47364]: purged_snaps scrub starts 2026-03-10T13:06:28.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:28 vm00 ceph-mon[47364]: purged_snaps scrub ok 2026-03-10T13:06:28.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:28 vm00 ceph-mon[47364]: osd.6 [v2:192.168.123.108:6816/2473564301,v1:192.168.123.108:6817/2473564301] boot 2026-03-10T13:06:28.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:28 vm00 ceph-mon[47364]: osdmap e35: 8 total, 7 up, 8 in 2026-03-10T13:06:28.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:28 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:06:28.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:28 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:06:28.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:28 vm00 ceph-mon[47364]: osdmap e36: 8 total, 7 up, 8 in 2026-03-10T13:06:28.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:28 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:06:28.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:28 vm00 ceph-mon[51670]: purged_snaps scrub starts 2026-03-10T13:06:28.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:28 vm00 ceph-mon[51670]: purged_snaps scrub ok 2026-03-10T13:06:28.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:28 vm00 ceph-mon[51670]: osd.6 [v2:192.168.123.108:6816/2473564301,v1:192.168.123.108:6817/2473564301] boot 2026-03-10T13:06:28.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:28 vm00 ceph-mon[51670]: osdmap e35: 8 total, 7 up, 8 in 2026-03-10T13:06:28.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:28 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:06:28.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:28 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:06:28.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:28 vm00 ceph-mon[51670]: osdmap e36: 8 total, 7 up, 8 in 2026-03-10T13:06:28.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:28 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:06:28.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:28 vm08 ceph-mon[49535]: purged_snaps scrub starts 2026-03-10T13:06:28.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:28 vm08 ceph-mon[49535]: purged_snaps scrub ok 2026-03-10T13:06:28.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:28 vm08 ceph-mon[49535]: osd.6 [v2:192.168.123.108:6816/2473564301,v1:192.168.123.108:6817/2473564301] boot 2026-03-10T13:06:28.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:28 vm08 ceph-mon[49535]: osdmap e35: 8 total, 7 up, 8 in 2026-03-10T13:06:28.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:28 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:06:28.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:28 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:06:28.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:28 vm08 ceph-mon[49535]: osdmap e36: 8 total, 7 up, 8 in 2026-03-10T13:06:28.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:28 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:06:29.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:29 vm00 ceph-mon[47364]: pgmap v72: 1 pgs: 1 peering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-10T13:06:29.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:29 vm00 ceph-mon[47364]: osdmap e37: 8 total, 7 up, 8 in 2026-03-10T13:06:29.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:29 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:06:29.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:29 vm00 ceph-mon[51670]: pgmap v72: 1 pgs: 1 peering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-10T13:06:29.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:29 vm00 ceph-mon[51670]: osdmap e37: 8 total, 7 up, 8 in 2026-03-10T13:06:29.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:29 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:06:29.508 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:29 vm08 ceph-mon[49535]: pgmap v72: 1 pgs: 1 peering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-10T13:06:29.508 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:29 vm08 ceph-mon[49535]: osdmap e37: 8 total, 7 up, 8 in 2026-03-10T13:06:29.508 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:29 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:06:30.441 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:30 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-10T13:06:30.441 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:30 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:30.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:30 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-10T13:06:30.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:30 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:30.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:30 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-10T13:06:30.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:30 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:31.341 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:31 vm08 ceph-mon[49535]: pgmap v75: 1 pgs: 1 peering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-10T13:06:31.341 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:31 vm08 ceph-mon[49535]: Deploying daemon osd.7 on vm08 2026-03-10T13:06:31.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:31 vm00 ceph-mon[47364]: pgmap v75: 1 pgs: 1 peering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-10T13:06:31.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:31 vm00 ceph-mon[47364]: Deploying daemon osd.7 on vm08 2026-03-10T13:06:31.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:31 vm00 ceph-mon[51670]: pgmap v75: 1 pgs: 1 peering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-10T13:06:31.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:31 vm00 ceph-mon[51670]: Deploying daemon osd.7 on vm08 2026-03-10T13:06:32.153 INFO:teuthology.orchestra.run.vm08.stdout:Created osd(s) 7 on host 'vm08' 2026-03-10T13:06:32.204 DEBUG:teuthology.orchestra.run.vm08:osd.7> sudo journalctl -f -n 0 -u ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.7.service 2026-03-10T13:06:32.206 INFO:tasks.cephadm:Waiting for 8 OSDs to come up... 2026-03-10T13:06:32.206 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph osd stat -f json 2026-03-10T13:06:32.442 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:32 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:32.442 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:32 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:06:32.442 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:32 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:32.442 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:32 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:06:32.442 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:32 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:32.442 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:32 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:32.442 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:32 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:32.442 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:32 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:06:32.442 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:32 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:32.442 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:32 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:06:32.442 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:32 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:32.442 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:32 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:06:32.442 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:32 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:32.442 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:32 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:06:32.442 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:32 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:32.442 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:32 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:32.442 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:32 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:32.442 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:32 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:06:32.442 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:32 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:32.443 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:32 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:06:32.542 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:32 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:32.542 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:32 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:06:32.542 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:32 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:32.542 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:32 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:06:32.542 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:32 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:32.542 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:32 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:32.542 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:32 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:32.542 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:32 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:06:32.542 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:32 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:32.542 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:32 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:06:32.705 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T13:06:32.774 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":37,"num_osds":8,"num_up_osds":7,"osd_up_since":1773147987,"num_in_osds":8,"osd_in_since":1773147986,"num_remapped_pgs":0} 2026-03-10T13:06:33.271 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:06:32 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[61424]: 2026-03-10T13:06:32.890+0000 7fe4e4d7e3c0 -1 osd.7 0 log_to_monitors true 2026-03-10T13:06:33.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:33 vm00 ceph-mon[47364]: pgmap v76: 1 pgs: 1 peering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-10T13:06:33.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:33 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/1718069629' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T13:06:33.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:33 vm00 ceph-mon[47364]: from='osd.7 [v2:192.168.123.108:6824/19133272,v1:192.168.123.108:6825/19133272]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T13:06:33.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:33 vm00 ceph-mon[47364]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T13:06:33.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:33 vm00 ceph-mon[51670]: pgmap v76: 1 pgs: 1 peering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-10T13:06:33.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:33 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/1718069629' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T13:06:33.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:33 vm00 ceph-mon[51670]: from='osd.7 [v2:192.168.123.108:6824/19133272,v1:192.168.123.108:6825/19133272]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T13:06:33.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:33 vm00 ceph-mon[51670]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T13:06:33.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:33 vm08 ceph-mon[49535]: pgmap v76: 1 pgs: 1 peering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-10T13:06:33.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:33 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/1718069629' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T13:06:33.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:33 vm08 ceph-mon[49535]: from='osd.7 [v2:192.168.123.108:6824/19133272,v1:192.168.123.108:6825/19133272]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T13:06:33.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:33 vm08 ceph-mon[49535]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T13:06:33.776 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph osd stat -f json 2026-03-10T13:06:34.257 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T13:06:34.305 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":38,"num_osds":8,"num_up_osds":7,"osd_up_since":1773147987,"num_in_osds":8,"osd_in_since":1773147986,"num_remapped_pgs":0} 2026-03-10T13:06:34.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:34 vm00 ceph-mon[47364]: Detected new or changed devices on vm08 2026-03-10T13:06:34.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:34 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:34.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:34 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:34.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:34 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:34.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:34 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:34.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:34 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:34.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:34 vm00 ceph-mon[47364]: Adjusting osd_memory_target on vm08 to 65803k 2026-03-10T13:06:34.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:34 vm00 ceph-mon[47364]: Unable to set osd_memory_target on vm08 to 67382681: error parsing value: Value '67382681' is below minimum 939524096 2026-03-10T13:06:34.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:34 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:34.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:34 vm00 ceph-mon[47364]: pgmap v77: 1 pgs: 1 active+clean; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail; 60 KiB/s, 0 objects/s recovering 2026-03-10T13:06:34.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:34 vm00 ceph-mon[47364]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-10T13:06:34.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:34 vm00 ceph-mon[47364]: osdmap e38: 8 total, 7 up, 8 in 2026-03-10T13:06:34.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:34 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:06:34.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:34 vm00 ceph-mon[47364]: from='osd.7 [v2:192.168.123.108:6824/19133272,v1:192.168.123.108:6825/19133272]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:06:34.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:34 vm00 ceph-mon[47364]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:06:34.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:34 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/159931565' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T13:06:34.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:34 vm00 ceph-mon[51670]: Detected new or changed devices on vm08 2026-03-10T13:06:34.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:34 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:34.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:34 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:34.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:34 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:34.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:34 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:34.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:34 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:34.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:34 vm00 ceph-mon[51670]: Adjusting osd_memory_target on vm08 to 65803k 2026-03-10T13:06:34.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:34 vm00 ceph-mon[51670]: Unable to set osd_memory_target on vm08 to 67382681: error parsing value: Value '67382681' is below minimum 939524096 2026-03-10T13:06:34.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:34 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:34.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:34 vm00 ceph-mon[51670]: pgmap v77: 1 pgs: 1 active+clean; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail; 60 KiB/s, 0 objects/s recovering 2026-03-10T13:06:34.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:34 vm00 ceph-mon[51670]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-10T13:06:34.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:34 vm00 ceph-mon[51670]: osdmap e38: 8 total, 7 up, 8 in 2026-03-10T13:06:34.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:34 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:06:34.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:34 vm00 ceph-mon[51670]: from='osd.7 [v2:192.168.123.108:6824/19133272,v1:192.168.123.108:6825/19133272]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:06:34.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:34 vm00 ceph-mon[51670]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:06:34.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:34 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/159931565' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T13:06:34.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:34 vm08 ceph-mon[49535]: Detected new or changed devices on vm08 2026-03-10T13:06:34.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:34 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:34.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:34 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:34.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:34 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:34.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:34 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:34.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:34 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:34.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:34 vm08 ceph-mon[49535]: Adjusting osd_memory_target on vm08 to 65803k 2026-03-10T13:06:34.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:34 vm08 ceph-mon[49535]: Unable to set osd_memory_target on vm08 to 67382681: error parsing value: Value '67382681' is below minimum 939524096 2026-03-10T13:06:34.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:34 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:34.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:34 vm08 ceph-mon[49535]: pgmap v77: 1 pgs: 1 active+clean; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail; 60 KiB/s, 0 objects/s recovering 2026-03-10T13:06:34.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:34 vm08 ceph-mon[49535]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-10T13:06:34.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:34 vm08 ceph-mon[49535]: osdmap e38: 8 total, 7 up, 8 in 2026-03-10T13:06:34.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:34 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:06:34.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:34 vm08 ceph-mon[49535]: from='osd.7 [v2:192.168.123.108:6824/19133272,v1:192.168.123.108:6825/19133272]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:06:34.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:34 vm08 ceph-mon[49535]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:06:34.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:34 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/159931565' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T13:06:34.772 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:06:34 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[61424]: 2026-03-10T13:06:34.560+0000 7fe4db781700 -1 osd.7 0 waiting for initial osdmap 2026-03-10T13:06:34.772 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:06:34 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[61424]: 2026-03-10T13:06:34.569+0000 7fe4d811c700 -1 osd.7 39 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T13:06:35.306 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph osd stat -f json 2026-03-10T13:06:35.581 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:35 vm00 ceph-mon[47364]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm08", "root=default"]}]': finished 2026-03-10T13:06:35.581 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:35 vm00 ceph-mon[47364]: osdmap e39: 8 total, 7 up, 8 in 2026-03-10T13:06:35.581 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:35 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:06:35.581 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:35 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:06:35.583 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:35 vm00 ceph-mon[51670]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm08", "root=default"]}]': finished 2026-03-10T13:06:35.583 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:35 vm00 ceph-mon[51670]: osdmap e39: 8 total, 7 up, 8 in 2026-03-10T13:06:35.583 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:35 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:06:35.583 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:35 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:06:35.762 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T13:06:35.809 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":40,"num_osds":8,"num_up_osds":8,"osd_up_since":1773147995,"num_in_osds":8,"osd_in_since":1773147986,"num_remapped_pgs":1} 2026-03-10T13:06:35.810 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph osd dump --format=json 2026-03-10T13:06:35.955 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/mon.a/config 2026-03-10T13:06:36.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:35 vm08 ceph-mon[49535]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm08", "root=default"]}]': finished 2026-03-10T13:06:36.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:35 vm08 ceph-mon[49535]: osdmap e39: 8 total, 7 up, 8 in 2026-03-10T13:06:36.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:35 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:06:36.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:35 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:06:36.311 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T13:06:36.312 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":40,"fsid":"98a3dada-1c81-11f1-89c9-d57c120f78d5","created":"2026-03-10T13:04:26.320123+0000","modified":"2026-03-10T13:06:35.555795+0000","last_up_change":"2026-03-10T13:06:35.555795+0000","last_in_change":"2026-03-10T13:06:26.067004+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":18,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"quincy","pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-10T13:05:53.703811+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"18","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}}}],"osds":[{"osd":0,"uuid":"33741dbc-5269-4c43-97b4-ac057d7a2041","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":8,"up_thru":39,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6802","nonce":771461294},{"type":"v1","addr":"192.168.123.100:6803","nonce":771461294}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6804","nonce":771461294},{"type":"v1","addr":"192.168.123.100:6805","nonce":771461294}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6808","nonce":771461294},{"type":"v1","addr":"192.168.123.100:6809","nonce":771461294}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6806","nonce":771461294},{"type":"v1","addr":"192.168.123.100:6807","nonce":771461294}]},"public_addr":"192.168.123.100:6803/771461294","cluster_addr":"192.168.123.100:6805/771461294","heartbeat_back_addr":"192.168.123.100:6809/771461294","heartbeat_front_addr":"192.168.123.100:6807/771461294","state":["exists","up"]},{"osd":1,"uuid":"31dc7b09-f48f-4ec2-8ad6-69f3b68a5138","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":11,"up_thru":26,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6810","nonce":3864481161},{"type":"v1","addr":"192.168.123.100:6811","nonce":3864481161}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6812","nonce":3864481161},{"type":"v1","addr":"192.168.123.100:6813","nonce":3864481161}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6816","nonce":3864481161},{"type":"v1","addr":"192.168.123.100:6817","nonce":3864481161}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6814","nonce":3864481161},{"type":"v1","addr":"192.168.123.100:6815","nonce":3864481161}]},"public_addr":"192.168.123.100:6811/3864481161","cluster_addr":"192.168.123.100:6813/3864481161","heartbeat_back_addr":"192.168.123.100:6817/3864481161","heartbeat_front_addr":"192.168.123.100:6815/3864481161","state":["exists","up"]},{"osd":2,"uuid":"f9f7ad09-367f-410b-9921-f31c456c313d","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":15,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6818","nonce":4074751068},{"type":"v1","addr":"192.168.123.100:6819","nonce":4074751068}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6820","nonce":4074751068},{"type":"v1","addr":"192.168.123.100:6821","nonce":4074751068}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6824","nonce":4074751068},{"type":"v1","addr":"192.168.123.100:6825","nonce":4074751068}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6822","nonce":4074751068},{"type":"v1","addr":"192.168.123.100:6823","nonce":4074751068}]},"public_addr":"192.168.123.100:6819/4074751068","cluster_addr":"192.168.123.100:6821/4074751068","heartbeat_back_addr":"192.168.123.100:6825/4074751068","heartbeat_front_addr":"192.168.123.100:6823/4074751068","state":["exists","up"]},{"osd":3,"uuid":"36dd1fdb-2d5f-4be6-b549-9bcc7e503439","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":21,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6826","nonce":441174892},{"type":"v1","addr":"192.168.123.100:6827","nonce":441174892}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6828","nonce":441174892},{"type":"v1","addr":"192.168.123.100:6829","nonce":441174892}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6832","nonce":441174892},{"type":"v1","addr":"192.168.123.100:6833","nonce":441174892}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6830","nonce":441174892},{"type":"v1","addr":"192.168.123.100:6831","nonce":441174892}]},"public_addr":"192.168.123.100:6827/441174892","cluster_addr":"192.168.123.100:6829/441174892","heartbeat_back_addr":"192.168.123.100:6833/441174892","heartbeat_front_addr":"192.168.123.100:6831/441174892","state":["exists","up"]},{"osd":4,"uuid":"2ec681f9-baf2-471e-8b59-1a1b47be1367","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":25,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6800","nonce":1094633829},{"type":"v1","addr":"192.168.123.108:6801","nonce":1094633829}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6802","nonce":1094633829},{"type":"v1","addr":"192.168.123.108:6803","nonce":1094633829}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6806","nonce":1094633829},{"type":"v1","addr":"192.168.123.108:6807","nonce":1094633829}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6804","nonce":1094633829},{"type":"v1","addr":"192.168.123.108:6805","nonce":1094633829}]},"public_addr":"192.168.123.108:6801/1094633829","cluster_addr":"192.168.123.108:6803/1094633829","heartbeat_back_addr":"192.168.123.108:6807/1094633829","heartbeat_front_addr":"192.168.123.108:6805/1094633829","state":["exists","up"]},{"osd":5,"uuid":"4b07141b-58eb-441e-a2a5-b6422715a810","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":30,"up_thru":31,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6808","nonce":658387712},{"type":"v1","addr":"192.168.123.108:6809","nonce":658387712}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6810","nonce":658387712},{"type":"v1","addr":"192.168.123.108:6811","nonce":658387712}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6814","nonce":658387712},{"type":"v1","addr":"192.168.123.108:6815","nonce":658387712}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6812","nonce":658387712},{"type":"v1","addr":"192.168.123.108:6813","nonce":658387712}]},"public_addr":"192.168.123.108:6809/658387712","cluster_addr":"192.168.123.108:6811/658387712","heartbeat_back_addr":"192.168.123.108:6815/658387712","heartbeat_front_addr":"192.168.123.108:6813/658387712","state":["exists","up"]},{"osd":6,"uuid":"8d39583c-2063-4d14-9842-a1a1a8782f74","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":35,"up_thru":36,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6816","nonce":2473564301},{"type":"v1","addr":"192.168.123.108:6817","nonce":2473564301}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6818","nonce":2473564301},{"type":"v1","addr":"192.168.123.108:6819","nonce":2473564301}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6822","nonce":2473564301},{"type":"v1","addr":"192.168.123.108:6823","nonce":2473564301}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6820","nonce":2473564301},{"type":"v1","addr":"192.168.123.108:6821","nonce":2473564301}]},"public_addr":"192.168.123.108:6817/2473564301","cluster_addr":"192.168.123.108:6819/2473564301","heartbeat_back_addr":"192.168.123.108:6823/2473564301","heartbeat_front_addr":"192.168.123.108:6821/2473564301","state":["exists","up"]},{"osd":7,"uuid":"e33bb491-3f0c-40cc-a5ae-770ea8457536","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":40,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6824","nonce":19133272},{"type":"v1","addr":"192.168.123.108:6825","nonce":19133272}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6826","nonce":19133272},{"type":"v1","addr":"192.168.123.108:6827","nonce":19133272}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6830","nonce":19133272},{"type":"v1","addr":"192.168.123.108:6831","nonce":19133272}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6828","nonce":19133272},{"type":"v1","addr":"192.168.123.108:6829","nonce":19133272}]},"public_addr":"192.168.123.108:6825/19133272","cluster_addr":"192.168.123.108:6827/19133272","heartbeat_back_addr":"192.168.123.108:6831/19133272","heartbeat_front_addr":"192.168.123.108:6829/19133272","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T13:05:29.519117+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T13:05:40.222041+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T13:05:50.560236+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T13:06:00.424824+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T13:06:09.428649+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T13:06:16.992877+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T13:06:25.955750+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"0.000000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_temp":[{"pgid":"1.0","osds":[0,6,1]}],"primary_temp":[],"blocklist":{"192.168.123.100:6801/2653325477":"2026-03-11T13:04:52.784340+0000","192.168.123.100:6800/2653325477":"2026-03-11T13:04:52.784340+0000","192.168.123.100:0/4037273490":"2026-03-11T13:04:52.784340+0000","192.168.123.100:0/4178613541":"2026-03-11T13:04:52.784340+0000","192.168.123.100:0/2030610713":"2026-03-11T13:04:52.784340+0000","192.168.123.100:0/1209684253":"2026-03-11T13:04:41.791091+0000","192.168.123.100:6801/6955224":"2026-03-11T13:04:41.791091+0000","192.168.123.100:0/2363309494":"2026-03-11T13:04:41.791091+0000","192.168.123.100:6800/6955224":"2026-03-11T13:04:41.791091+0000","192.168.123.100:0/4024415478":"2026-03-11T13:04:41.791091+0000"},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-10T13:06:36.377 INFO:tasks.cephadm.ceph_manager.ceph:[{'pool': 1, 'pool_name': '.mgr', 'create_time': '2026-03-10T13:05:53.703811+0000', 'flags': 1, 'flags_names': 'hashpspool', 'type': 1, 'size': 3, 'min_size': 2, 'crush_rule': 0, 'peering_crush_bucket_count': 0, 'peering_crush_bucket_target': 0, 'peering_crush_bucket_barrier': 0, 'peering_crush_bucket_mandatory_member': 2147483647, 'object_hash': 2, 'pg_autoscale_mode': 'off', 'pg_num': 1, 'pg_placement_num': 1, 'pg_placement_num_target': 1, 'pg_num_target': 1, 'pg_num_pending': 1, 'last_pg_merge_meta': {'source_pgid': '0.0', 'ready_epoch': 0, 'last_epoch_started': 0, 'last_epoch_clean': 0, 'source_version': "0'0", 'target_version': "0'0"}, 'last_change': '18', 'last_force_op_resend': '0', 'last_force_op_resend_prenautilus': '0', 'last_force_op_resend_preluminous': '0', 'auid': 0, 'snap_mode': 'selfmanaged', 'snap_seq': 0, 'snap_epoch': 0, 'pool_snaps': [], 'removed_snaps': '[]', 'quota_max_bytes': 0, 'quota_max_objects': 0, 'tiers': [], 'tier_of': -1, 'read_tier': -1, 'write_tier': -1, 'cache_mode': 'none', 'target_max_bytes': 0, 'target_max_objects': 0, 'cache_target_dirty_ratio_micro': 400000, 'cache_target_dirty_high_ratio_micro': 600000, 'cache_target_full_ratio_micro': 800000, 'cache_min_flush_age': 0, 'cache_min_evict_age': 0, 'erasure_code_profile': '', 'hit_set_params': {'type': 'none'}, 'hit_set_period': 0, 'hit_set_count': 0, 'use_gmt_hitset': True, 'min_read_recency_for_promote': 0, 'min_write_recency_for_promote': 0, 'hit_set_grade_decay_rate': 0, 'hit_set_search_last_n': 0, 'grade_table': [], 'stripe_width': 0, 'expected_num_objects': 0, 'fast_read': False, 'options': {'pg_num_max': 32, 'pg_num_min': 1}, 'application_metadata': {'mgr': {}}}] 2026-03-10T13:06:36.377 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph osd pool get .mgr pg_num 2026-03-10T13:06:36.541 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/mon.a/config 2026-03-10T13:06:36.607 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:36 vm00 ceph-mon[47364]: purged_snaps scrub starts 2026-03-10T13:06:36.607 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:36 vm00 ceph-mon[47364]: purged_snaps scrub ok 2026-03-10T13:06:36.607 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:36 vm00 ceph-mon[47364]: pgmap v80: 1 pgs: 1 active+clean; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail; 56 KiB/s, 0 objects/s recovering 2026-03-10T13:06:36.607 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:36 vm00 ceph-mon[47364]: osd.7 [v2:192.168.123.108:6824/19133272,v1:192.168.123.108:6825/19133272] boot 2026-03-10T13:06:36.607 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:36 vm00 ceph-mon[47364]: osdmap e40: 8 total, 8 up, 8 in 2026-03-10T13:06:36.607 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:36 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:06:36.607 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:36 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/1473868113' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T13:06:36.607 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:36 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2788031216' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T13:06:36.608 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:36 vm00 ceph-mon[51670]: purged_snaps scrub starts 2026-03-10T13:06:36.608 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:36 vm00 ceph-mon[51670]: purged_snaps scrub ok 2026-03-10T13:06:36.608 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:36 vm00 ceph-mon[51670]: pgmap v80: 1 pgs: 1 active+clean; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail; 56 KiB/s, 0 objects/s recovering 2026-03-10T13:06:36.608 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:36 vm00 ceph-mon[51670]: osd.7 [v2:192.168.123.108:6824/19133272,v1:192.168.123.108:6825/19133272] boot 2026-03-10T13:06:36.608 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:36 vm00 ceph-mon[51670]: osdmap e40: 8 total, 8 up, 8 in 2026-03-10T13:06:36.608 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:36 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:06:36.608 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:36 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/1473868113' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T13:06:36.608 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:36 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2788031216' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T13:06:36.883 INFO:teuthology.orchestra.run.vm00.stdout:pg_num: 1 2026-03-10T13:06:36.931 INFO:tasks.cephadm:Adding prometheus.a on vm08 2026-03-10T13:06:36.932 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph orch apply prometheus '1;vm08=a' 2026-03-10T13:06:36.958 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:36 vm08 ceph-mon[49535]: purged_snaps scrub starts 2026-03-10T13:06:36.958 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:36 vm08 ceph-mon[49535]: purged_snaps scrub ok 2026-03-10T13:06:36.958 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:36 vm08 ceph-mon[49535]: pgmap v80: 1 pgs: 1 active+clean; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail; 56 KiB/s, 0 objects/s recovering 2026-03-10T13:06:36.958 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:36 vm08 ceph-mon[49535]: osd.7 [v2:192.168.123.108:6824/19133272,v1:192.168.123.108:6825/19133272] boot 2026-03-10T13:06:36.958 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:36 vm08 ceph-mon[49535]: osdmap e40: 8 total, 8 up, 8 in 2026-03-10T13:06:36.958 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:36 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:06:36.958 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:36 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/1473868113' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T13:06:36.958 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:36 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/2788031216' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T13:06:37.423 INFO:teuthology.orchestra.run.vm08.stdout:Scheduled prometheus update... 2026-03-10T13:06:37.467 DEBUG:teuthology.orchestra.run.vm08:prometheus.a> sudo journalctl -f -n 0 -u ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@prometheus.a.service 2026-03-10T13:06:37.510 INFO:tasks.cephadm:Adding node-exporter.a on vm00 2026-03-10T13:06:37.510 INFO:tasks.cephadm:Adding node-exporter.b on vm08 2026-03-10T13:06:37.510 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph orch apply node-exporter '2;vm00=a;vm08=b' 2026-03-10T13:06:37.725 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:37 vm08 ceph-mon[49535]: osdmap e41: 8 total, 8 up, 8 in 2026-03-10T13:06:37.725 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:37 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/1274846834' entity='client.admin' cmd=[{"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"}]: dispatch 2026-03-10T13:06:37.725 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:37 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:37.725 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:37 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:06:37.725 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:37 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:37.725 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:37 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:06:37.725 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:37 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:37.725 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:37 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mgr module enable", "module": "prometheus"}]: dispatch 2026-03-10T13:06:38.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:37 vm00 ceph-mon[51670]: osdmap e41: 8 total, 8 up, 8 in 2026-03-10T13:06:38.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:37 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/1274846834' entity='client.admin' cmd=[{"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"}]: dispatch 2026-03-10T13:06:38.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:37 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:38.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:37 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:06:38.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:37 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:38.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:37 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:06:38.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:37 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:38.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:37 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mgr module enable", "module": "prometheus"}]: dispatch 2026-03-10T13:06:38.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:37 vm00 ceph-mon[47364]: osdmap e41: 8 total, 8 up, 8 in 2026-03-10T13:06:38.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:37 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/1274846834' entity='client.admin' cmd=[{"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"}]: dispatch 2026-03-10T13:06:38.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:37 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:38.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:37 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:06:38.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:37 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:38.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:37 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:06:38.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:37 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:38.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:37 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd=[{"prefix": "mgr module enable", "module": "prometheus"}]: dispatch 2026-03-10T13:06:38.005 INFO:teuthology.orchestra.run.vm08.stdout:Scheduled node-exporter update... 2026-03-10T13:06:38.072 DEBUG:teuthology.orchestra.run.vm00:node-exporter.a> sudo journalctl -f -n 0 -u ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@node-exporter.a.service 2026-03-10T13:06:38.074 DEBUG:teuthology.orchestra.run.vm08:node-exporter.b> sudo journalctl -f -n 0 -u ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@node-exporter.b.service 2026-03-10T13:06:38.076 INFO:tasks.cephadm:Adding alertmanager.a on vm00 2026-03-10T13:06:38.076 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph orch apply alertmanager '1;vm00=a' 2026-03-10T13:06:38.515 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:06:38 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: ignoring --setuser ceph since I am not root 2026-03-10T13:06:38.515 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:06:38 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: ignoring --setgroup ceph since I am not root 2026-03-10T13:06:38.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:38 vm00 ceph-mon[47364]: from='client.24272 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "prometheus", "placement": "1;vm08=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:06:38.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:38 vm00 ceph-mon[47364]: Saving service prometheus spec with placement vm08=a;count:1 2026-03-10T13:06:38.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:38 vm00 ceph-mon[47364]: pgmap v83: 1 pgs: 1 active+clean; 449 KiB data, 46 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:06:38.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:38 vm00 ceph-mon[47364]: osdmap e42: 8 total, 8 up, 8 in 2026-03-10T13:06:38.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:38 vm00 ceph-mon[47364]: from='client.24278 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "node-exporter", "placement": "2;vm00=a;vm08=b", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:06:38.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:38 vm00 ceph-mon[47364]: Saving service node-exporter spec with placement vm00=a;vm08=b;count:2 2026-03-10T13:06:38.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:38 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:38.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:38 vm00 ceph-mon[47364]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-03-10T13:06:38.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:38 vm00 ceph-mon[47364]: mgrmap e16: y(active, since 105s), standbys: x 2026-03-10T13:06:38.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:38 vm00 ceph-mon[51670]: from='client.24272 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "prometheus", "placement": "1;vm08=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:06:38.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:38 vm00 ceph-mon[51670]: Saving service prometheus spec with placement vm08=a;count:1 2026-03-10T13:06:38.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:38 vm00 ceph-mon[51670]: pgmap v83: 1 pgs: 1 active+clean; 449 KiB data, 46 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:06:38.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:38 vm00 ceph-mon[51670]: osdmap e42: 8 total, 8 up, 8 in 2026-03-10T13:06:38.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:38 vm00 ceph-mon[51670]: from='client.24278 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "node-exporter", "placement": "2;vm00=a;vm08=b", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:06:38.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:38 vm00 ceph-mon[51670]: Saving service node-exporter spec with placement vm00=a;vm08=b;count:2 2026-03-10T13:06:38.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:38 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:38.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:38 vm00 ceph-mon[51670]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-03-10T13:06:38.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:38 vm00 ceph-mon[51670]: mgrmap e16: y(active, since 105s), standbys: x 2026-03-10T13:06:38.754 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:06:38 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ignoring --setuser ceph since I am not root 2026-03-10T13:06:38.754 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:06:38 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ignoring --setgroup ceph since I am not root 2026-03-10T13:06:38.754 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:06:38 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:06:38.608+0000 7ff230cb3000 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-10T13:06:38.754 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:06:38 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:06:38.662+0000 7ff230cb3000 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T13:06:38.771 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:06:38 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:06:38.616+0000 7fa40f590000 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-10T13:06:38.771 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:06:38 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:06:38.672+0000 7fa40f590000 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T13:06:38.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:38 vm08 ceph-mon[49535]: from='client.24272 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "prometheus", "placement": "1;vm08=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:06:38.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:38 vm08 ceph-mon[49535]: Saving service prometheus spec with placement vm08=a;count:1 2026-03-10T13:06:38.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:38 vm08 ceph-mon[49535]: pgmap v83: 1 pgs: 1 active+clean; 449 KiB data, 46 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:06:38.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:38 vm08 ceph-mon[49535]: osdmap e42: 8 total, 8 up, 8 in 2026-03-10T13:06:38.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:38 vm08 ceph-mon[49535]: from='client.24278 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "node-exporter", "placement": "2;vm00=a;vm08=b", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:06:38.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:38 vm08 ceph-mon[49535]: Saving service node-exporter spec with placement vm00=a;vm08=b;count:2 2026-03-10T13:06:38.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:38 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' 2026-03-10T13:06:38.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:38 vm08 ceph-mon[49535]: from='mgr.14152 192.168.123.100:0/1655457509' entity='mgr.y' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-03-10T13:06:38.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:38 vm08 ceph-mon[49535]: mgrmap e16: y(active, since 105s), standbys: x 2026-03-10T13:06:39.443 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:06:39 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:06:39.063+0000 7ff230cb3000 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T13:06:39.443 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:06:39 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:06:39.440+0000 7ff230cb3000 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T13:06:39.462 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:06:39 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:06:39.081+0000 7fa40f590000 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T13:06:39.753 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:06:39 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:06:39.595+0000 7ff230cb3000 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T13:06:39.753 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:06:39 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:06:39.652+0000 7ff230cb3000 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T13:06:39.771 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:06:39 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:06:39.459+0000 7fa40f590000 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T13:06:39.771 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:06:39 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:06:39.614+0000 7fa40f590000 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T13:06:39.771 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:06:39 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:06:39.672+0000 7fa40f590000 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T13:06:40.253 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:06:39 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:06:39.820+0000 7ff230cb3000 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T13:06:40.271 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:06:39 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:06:39.854+0000 7fa40f590000 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T13:06:40.683 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:06:40 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:06:40.424+0000 7ff230cb3000 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T13:06:40.683 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:06:40 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:06:40.620+0000 7ff230cb3000 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T13:06:40.718 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:06:40 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:06:40.459+0000 7fa40f590000 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T13:06:40.718 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:06:40 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:06:40.654+0000 7fa40f590000 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T13:06:41.003 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:06:40 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:06:40.680+0000 7ff230cb3000 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T13:06:41.003 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:06:40 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:06:40.741+0000 7ff230cb3000 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T13:06:41.003 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:06:40 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:06:40.810+0000 7ff230cb3000 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T13:06:41.003 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:06:40 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:06:40.873+0000 7ff230cb3000 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T13:06:41.021 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:06:40 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:06:40.715+0000 7fa40f590000 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T13:06:41.021 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:06:40 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:06:40.781+0000 7fa40f590000 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T13:06:41.021 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:06:40 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:06:40.847+0000 7fa40f590000 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T13:06:41.021 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:06:40 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:06:40.907+0000 7fa40f590000 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T13:06:41.503 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:06:41 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:06:41.194+0000 7ff230cb3000 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T13:06:41.503 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:06:41 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:06:41.271+0000 7ff230cb3000 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T13:06:41.521 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:06:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:06:41.236+0000 7fa40f590000 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T13:06:41.521 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:06:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:06:41.314+0000 7fa40f590000 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T13:06:42.125 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:06:41 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:06:41.862+0000 7ff230cb3000 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T13:06:42.125 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:06:41 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:06:41.925+0000 7ff230cb3000 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T13:06:42.125 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:06:41 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:06:41.992+0000 7ff230cb3000 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T13:06:42.188 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:06:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:06:41.917+0000 7fa40f590000 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T13:06:42.188 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:06:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:06:41.981+0000 7fa40f590000 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T13:06:42.188 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:06:42 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:06:42.048+0000 7fa40f590000 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T13:06:42.381 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:06:42 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:06:42.122+0000 7ff230cb3000 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T13:06:42.382 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:06:42 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:06:42.187+0000 7ff230cb3000 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T13:06:42.382 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:06:42 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:06:42.286+0000 7ff230cb3000 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T13:06:42.452 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:06:42 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:06:42.185+0000 7fa40f590000 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T13:06:42.452 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:06:42 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:06:42.250+0000 7fa40f590000 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T13:06:42.452 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:06:42 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:06:42.355+0000 7fa40f590000 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T13:06:42.705 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:06:42 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:06:42.379+0000 7ff230cb3000 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T13:06:42.771 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:06:42 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:06:42.449+0000 7fa40f590000 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T13:06:43.003 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:06:42 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:06:42.703+0000 7ff230cb3000 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T13:06:43.003 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:06:42 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:06:42.763+0000 7ff230cb3000 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T13:06:43.271 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:06:42 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:06:42.779+0000 7fa40f590000 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T13:06:43.271 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:06:42 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:06:42.844+0000 7fa40f590000 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T13:06:43.271 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:06:42 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: [10/Mar/2026:13:06:42] ENGINE Bus STARTING 2026-03-10T13:06:43.271 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:06:42 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: CherryPy Checker: 2026-03-10T13:06:43.271 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:06:42 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: The Application mounted at '' has an empty config. 2026-03-10T13:06:43.271 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:06:42 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: 2026-03-10T13:06:43.271 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:06:42 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: [10/Mar/2026:13:06:42] ENGINE Serving on http://:::9283 2026-03-10T13:06:43.271 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:06:42 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: [10/Mar/2026:13:06:42] ENGINE Bus STARTED 2026-03-10T13:06:43.809 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:43 vm08 ceph-mon[49535]: Active manager daemon y restarted 2026-03-10T13:06:43.810 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:43 vm08 ceph-mon[49535]: Activating manager daemon y 2026-03-10T13:06:43.810 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:43 vm08 ceph-mon[49535]: osdmap e43: 8 total, 8 up, 8 in 2026-03-10T13:06:43.810 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:43 vm08 ceph-mon[49535]: Standby manager daemon x restarted 2026-03-10T13:06:43.810 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:43 vm08 ceph-mon[49535]: Standby manager daemon x started 2026-03-10T13:06:43.810 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:43 vm08 ceph-mon[49535]: from='mgr.? 192.168.123.108:0/1726549828' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T13:06:43.810 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:43 vm08 ceph-mon[49535]: from='mgr.? 192.168.123.108:0/1726549828' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T13:06:43.810 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:43 vm08 ceph-mon[49535]: from='mgr.? 192.168.123.108:0/1726549828' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T13:06:43.810 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:43 vm08 ceph-mon[49535]: from='mgr.? 192.168.123.108:0/1726549828' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T13:06:43.810 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:43 vm00 ceph-mon[47364]: Active manager daemon y restarted 2026-03-10T13:06:43.810 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:43 vm00 ceph-mon[47364]: Activating manager daemon y 2026-03-10T13:06:43.810 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:43 vm00 ceph-mon[47364]: osdmap e43: 8 total, 8 up, 8 in 2026-03-10T13:06:43.810 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:43 vm00 ceph-mon[47364]: Standby manager daemon x restarted 2026-03-10T13:06:43.810 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:43 vm00 ceph-mon[47364]: Standby manager daemon x started 2026-03-10T13:06:43.810 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:43 vm00 ceph-mon[47364]: from='mgr.? 192.168.123.108:0/1726549828' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T13:06:43.810 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:43 vm00 ceph-mon[47364]: from='mgr.? 192.168.123.108:0/1726549828' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T13:06:43.810 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:43 vm00 ceph-mon[47364]: from='mgr.? 192.168.123.108:0/1726549828' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T13:06:43.810 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:43 vm00 ceph-mon[47364]: from='mgr.? 192.168.123.108:0/1726549828' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T13:06:43.810 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:06:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: [10/Mar/2026:13:06:43] ENGINE Bus STARTING 2026-03-10T13:06:43.810 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:43 vm00 ceph-mon[51670]: Active manager daemon y restarted 2026-03-10T13:06:43.810 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:43 vm00 ceph-mon[51670]: Activating manager daemon y 2026-03-10T13:06:43.811 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:43 vm00 ceph-mon[51670]: osdmap e43: 8 total, 8 up, 8 in 2026-03-10T13:06:43.811 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:43 vm00 ceph-mon[51670]: Standby manager daemon x restarted 2026-03-10T13:06:43.811 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:43 vm00 ceph-mon[51670]: Standby manager daemon x started 2026-03-10T13:06:43.811 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:43 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.108:0/1726549828' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T13:06:43.811 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:43 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.108:0/1726549828' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T13:06:43.811 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:43 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.108:0/1726549828' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T13:06:43.811 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:43 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.108:0/1726549828' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T13:06:44.085 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:06:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: [10/Mar/2026:13:06:43] ENGINE Bus STARTING 2026-03-10T13:06:44.085 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:06:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: CherryPy Checker: 2026-03-10T13:06:44.085 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:06:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: The Application mounted at '' has an empty config. 2026-03-10T13:06:44.085 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:06:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:06:44.085 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:06:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: [10/Mar/2026:13:06:43] ENGINE Serving on http://:::9283 2026-03-10T13:06:44.085 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:06:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: [10/Mar/2026:13:06:43] ENGINE Bus STARTED 2026-03-10T13:06:44.085 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:06:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: [10/Mar/2026:13:06:43] ENGINE Serving on https://192.168.123.100:7150 2026-03-10T13:06:44.085 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:06:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: [10/Mar/2026:13:06:43] ENGINE Bus STARTED 2026-03-10T13:06:44.610 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:44 vm08 ceph-mon[49535]: mgrmap e17: y(active, starting, since 0.796095s), standbys: x 2026-03-10T13:06:44.610 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:44 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:06:44.610 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:44 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:06:44.610 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:44 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:06:44.610 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:44 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T13:06:44.610 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:44 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T13:06:44.610 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:44 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T13:06:44.610 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:44 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T13:06:44.610 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:44 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T13:06:44.610 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:44 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:06:44.610 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:44 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:06:44.610 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:44 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:06:44.610 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:44 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:06:44.610 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:44 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:06:44.610 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:44 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T13:06:44.610 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:44 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T13:06:44.610 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:44 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T13:06:44.610 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:44 vm08 ceph-mon[49535]: Manager daemon y is now available 2026-03-10T13:06:44.610 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:44 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:44.610 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:44 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:06:44.610 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:44 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:06:44.610 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:44 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:44.610 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:44 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:06:44.610 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:44 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:06:44.610 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:44 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:06:44.610 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:44 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:06:44.610 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:44 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:06:44.610 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:44 vm08 ceph-mon[49535]: [10/Mar/2026:13:06:43] ENGINE Bus STARTING 2026-03-10T13:06:44.610 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:44 vm08 ceph-mon[49535]: [10/Mar/2026:13:06:43] ENGINE Serving on https://192.168.123.100:7150 2026-03-10T13:06:44.610 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:44 vm08 ceph-mon[49535]: [10/Mar/2026:13:06:43] ENGINE Bus STARTED 2026-03-10T13:06:44.610 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:44 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:44.610 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:44 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:44.610 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:44 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:44.611 INFO:teuthology.orchestra.run.vm08.stdout:Scheduled alertmanager update... 2026-03-10T13:06:44.671 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[47364]: mgrmap e17: y(active, starting, since 0.796095s), standbys: x 2026-03-10T13:06:44.671 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:06:44.671 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:06:44.671 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:06:44.671 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T13:06:44.671 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T13:06:44.671 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T13:06:44.671 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T13:06:44.671 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T13:06:44.671 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:06:44.671 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:06:44.671 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:06:44.671 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:06:44.671 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:06:44.671 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T13:06:44.671 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T13:06:44.671 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[47364]: Manager daemon y is now available 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[47364]: [10/Mar/2026:13:06:43] ENGINE Bus STARTING 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[47364]: [10/Mar/2026:13:06:43] ENGINE Serving on https://192.168.123.100:7150 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[47364]: [10/Mar/2026:13:06:43] ENGINE Bus STARTED 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[51670]: mgrmap e17: y(active, starting, since 0.796095s), standbys: x 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[51670]: Manager daemon y is now available 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[51670]: [10/Mar/2026:13:06:43] ENGINE Bus STARTING 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[51670]: [10/Mar/2026:13:06:43] ENGINE Serving on https://192.168.123.100:7150 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[51670]: [10/Mar/2026:13:06:43] ENGINE Bus STARTED 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:44.672 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:44 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:44.684 DEBUG:teuthology.orchestra.run.vm00:alertmanager.a> sudo journalctl -f -n 0 -u ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@alertmanager.a.service 2026-03-10T13:06:44.686 INFO:tasks.cephadm:Adding grafana.a on vm08 2026-03-10T13:06:44.686 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph orch apply grafana '1;vm08=a' 2026-03-10T13:06:45.258 INFO:teuthology.orchestra.run.vm08.stdout:Scheduled grafana update... 2026-03-10T13:06:45.312 DEBUG:teuthology.orchestra.run.vm08:grafana.a> sudo journalctl -f -n 0 -u ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@grafana.a.service 2026-03-10T13:06:45.313 INFO:tasks.cephadm:Setting up client nodes... 2026-03-10T13:06:45.314 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph auth get-or-create client.0 mon 'allow *' osd 'allow *' mds 'allow *' mgr 'allow *' 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[47364]: mgrmap e18: y(active, since 1.81301s), standbys: x 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[47364]: from='client.24289 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "alertmanager", "placement": "1;vm00=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[47364]: pgmap v3: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[47364]: Saving service alertmanager spec with placement vm00=a;count:1 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[47364]: Adjusting osd_memory_target on vm08 to 65803k 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[47364]: Unable to set osd_memory_target on vm08 to 67382681: error parsing value: Value '67382681' is below minimum 939524096 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[47364]: Updating vm08:/etc/ceph/ceph.conf 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[47364]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[47364]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[47364]: Updating vm08:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[47364]: from='client.24305 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "grafana", "placement": "1;vm08=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[47364]: Saving service grafana spec with placement vm08=a;count:1 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[47364]: Deploying daemon node-exporter.a on vm00 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[51670]: mgrmap e18: y(active, since 1.81301s), standbys: x 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[51670]: from='client.24289 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "alertmanager", "placement": "1;vm00=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[51670]: pgmap v3: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[51670]: Saving service alertmanager spec with placement vm00=a;count:1 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:45.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:45.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:45.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:45.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[51670]: Adjusting osd_memory_target on vm08 to 65803k 2026-03-10T13:06:45.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[51670]: Unable to set osd_memory_target on vm08 to 67382681: error parsing value: Value '67382681' is below minimum 939524096 2026-03-10T13:06:45.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[51670]: Updating vm08:/etc/ceph/ceph.conf 2026-03-10T13:06:45.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:45.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:45.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:45.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[51670]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T13:06:45.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[51670]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:06:45.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[51670]: Updating vm08:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:06:45.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:45.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[51670]: from='client.24305 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "grafana", "placement": "1;vm08=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:06:45.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[51670]: Saving service grafana spec with placement vm08=a;count:1 2026-03-10T13:06:45.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:45.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:45.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:45.756 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:45 vm00 ceph-mon[51670]: Deploying daemon node-exporter.a on vm00 2026-03-10T13:06:46.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:45 vm08 ceph-mon[49535]: mgrmap e18: y(active, since 1.81301s), standbys: x 2026-03-10T13:06:46.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:45 vm08 ceph-mon[49535]: from='client.24289 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "alertmanager", "placement": "1;vm00=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:06:46.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:45 vm08 ceph-mon[49535]: pgmap v3: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:06:46.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:45 vm08 ceph-mon[49535]: Saving service alertmanager spec with placement vm00=a;count:1 2026-03-10T13:06:46.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:45 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:46.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:45 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:46.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:45 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:46.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:45 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:46.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:45 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:46.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:45 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:46.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:45 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:46.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:45 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:46.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:45 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:46.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:45 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:46.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:45 vm08 ceph-mon[49535]: Adjusting osd_memory_target on vm08 to 65803k 2026-03-10T13:06:46.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:45 vm08 ceph-mon[49535]: Unable to set osd_memory_target on vm08 to 67382681: error parsing value: Value '67382681' is below minimum 939524096 2026-03-10T13:06:46.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:45 vm08 ceph-mon[49535]: Updating vm08:/etc/ceph/ceph.conf 2026-03-10T13:06:46.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:45 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:46.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:45 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:46.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:45 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:06:46.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:45 vm08 ceph-mon[49535]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T13:06:46.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:45 vm08 ceph-mon[49535]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:06:46.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:45 vm08 ceph-mon[49535]: Updating vm08:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:06:46.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:45 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:46.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:45 vm08 ceph-mon[49535]: from='client.24305 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "grafana", "placement": "1;vm08=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:06:46.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:45 vm08 ceph-mon[49535]: Saving service grafana spec with placement vm08=a;count:1 2026-03-10T13:06:46.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:45 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:46.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:45 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:46.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:45 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:46.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:45 vm08 ceph-mon[49535]: Deploying daemon node-exporter.a on vm00 2026-03-10T13:06:46.064 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:46 vm00 systemd[1]: Starting Ceph node-exporter.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:06:46.065 INFO:teuthology.orchestra.run.vm00.stdout:[client.0] 2026-03-10T13:06:46.065 INFO:teuthology.orchestra.run.vm00.stdout: key = AQBmF7BpFdJJAxAAWAcqa9TsJoCpsGlRSARRYA== 2026-03-10T13:06:46.130 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-03-10T13:06:46.130 DEBUG:teuthology.orchestra.run.vm00:> sudo dd of=/etc/ceph/ceph.client.0.keyring 2026-03-10T13:06:46.130 DEBUG:teuthology.orchestra.run.vm00:> sudo chmod 0644 /etc/ceph/ceph.client.0.keyring 2026-03-10T13:06:46.166 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph auth get-or-create client.1 mon 'allow *' osd 'allow *' mds 'allow *' mgr 'allow *' 2026-03-10T13:06:46.503 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:46 vm00 bash[65902]: Trying to pull quay.io/prometheus/node-exporter:v1.3.1... 2026-03-10T13:06:46.662 INFO:teuthology.orchestra.run.vm08.stdout:[client.1] 2026-03-10T13:06:46.662 INFO:teuthology.orchestra.run.vm08.stdout: key = AQBmF7BpDvkpJxAAJPl3iRHjRflg8RaoJU9zRQ== 2026-03-10T13:06:46.695 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:46 vm08 ceph-mon[49535]: pgmap v4: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:06:46.695 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:46 vm08 ceph-mon[49535]: mgrmap e19: y(active, since 2s), standbys: x 2026-03-10T13:06:46.695 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:46 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/1334796153' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T13:06:46.695 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:46 vm08 ceph-mon[49535]: from='client.? ' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T13:06:46.695 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:46 vm08 ceph-mon[49535]: from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-10T13:06:46.728 DEBUG:teuthology.orchestra.run.vm08:> set -ex 2026-03-10T13:06:46.728 DEBUG:teuthology.orchestra.run.vm08:> sudo dd of=/etc/ceph/ceph.client.1.keyring 2026-03-10T13:06:46.728 DEBUG:teuthology.orchestra.run.vm08:> sudo chmod 0644 /etc/ceph/ceph.client.1.keyring 2026-03-10T13:06:46.764 INFO:tasks.ceph:Waiting until ceph daemons up and pgs clean... 2026-03-10T13:06:46.764 INFO:tasks.cephadm.ceph_manager.ceph:waiting for mgr available 2026-03-10T13:06:46.764 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph mgr dump --format=json 2026-03-10T13:06:46.908 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/mon.a/config 2026-03-10T13:06:46.934 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:46 vm00 ceph-mon[47364]: pgmap v4: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:06:46.934 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:46 vm00 ceph-mon[47364]: mgrmap e19: y(active, since 2s), standbys: x 2026-03-10T13:06:46.934 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:46 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/1334796153' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T13:06:46.934 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:46 vm00 ceph-mon[47364]: from='client.? ' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T13:06:46.934 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:46 vm00 ceph-mon[47364]: from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-10T13:06:46.934 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:46 vm00 ceph-mon[51670]: pgmap v4: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:06:46.934 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:46 vm00 ceph-mon[51670]: mgrmap e19: y(active, since 2s), standbys: x 2026-03-10T13:06:46.934 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:46 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/1334796153' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T13:06:46.934 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:46 vm00 ceph-mon[51670]: from='client.? ' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T13:06:46.934 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:46 vm00 ceph-mon[51670]: from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-10T13:06:47.270 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T13:06:47.334 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":19,"active_gid":24298,"active_name":"y","active_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6800","nonce":1017778394},{"type":"v1","addr":"192.168.123.100:6801","nonce":1017778394}]},"active_addr":"192.168.123.100:6801/1017778394","active_change":"2026-03-10T13:06:42.767482+0000","active_mgr_features":4540138303579357183,"available":true,"standbys":[{"gid":14415,"name":"x","mgr_features":4540138303579357183,"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"7","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 or 7 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2400","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"7","min":"0","max":"7","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 or 7 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","upmap"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.23.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/ceph-grafana:8.3.5","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"docker.io/library/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"HAproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"docker.io/arcts/keepalived","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.3.1","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v2.33.4","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"docker.io/maxwo/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"SNMP Gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"docker.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"diskprediction_local","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predict_interval":{"name":"predict_interval","type":"str","level":"advanced","flags":0,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predictor_model":{"name":"predictor_model","type":"str","level":"advanced","flags":0,"default_value":"prophetstor","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"str","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"noautoscale":{"name":"noautoscale","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"global autoscale flag","long_desc":"Option to turn on/off the autoscaler for all pools","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"restful","can_run":true,"error_string":"","module_options":{"enable_auth":{"name":"enable_auth","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"drive_group_interval":{"name":"drive_group_interval","type":"float","level":"advanced","flags":0,"default_value":"300.0","min":"","max":"","enum_allowed":[],"desc":"interval in seconds between re-application of applied drive_groups","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]}}},{"name":"zabbix","can_run":true,"error_string":"","module_options":{"discovery_interval":{"name":"discovery_interval","type":"uint","level":"advanced","flags":0,"default_value":"100","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"identifier":{"name":"identifier","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_host":{"name":"zabbix_host","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_port":{"name":"zabbix_port","type":"int","level":"advanced","flags":0,"default_value":"10051","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_sender":{"name":"zabbix_sender","type":"str","level":"advanced","flags":0,"default_value":"/usr/bin/zabbix_sender","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}]}],"modules":["cephadm","dashboard","iostat","nfs","prometheus","restful"],"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"7","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 or 7 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2400","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"7","min":"0","max":"7","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 or 7 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","upmap"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.23.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/ceph-grafana:8.3.5","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"docker.io/library/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"HAproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"docker.io/arcts/keepalived","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.3.1","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v2.33.4","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"docker.io/maxwo/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"SNMP Gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"docker.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"diskprediction_local","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predict_interval":{"name":"predict_interval","type":"str","level":"advanced","flags":0,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predictor_model":{"name":"predictor_model","type":"str","level":"advanced","flags":0,"default_value":"prophetstor","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"str","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"noautoscale":{"name":"noautoscale","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"global autoscale flag","long_desc":"Option to turn on/off the autoscaler for all pools","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"restful","can_run":true,"error_string":"","module_options":{"enable_auth":{"name":"enable_auth","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"drive_group_interval":{"name":"drive_group_interval","type":"float","level":"advanced","flags":0,"default_value":"300.0","min":"","max":"","enum_allowed":[],"desc":"interval in seconds between re-application of applied drive_groups","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]}}},{"name":"zabbix","can_run":true,"error_string":"","module_options":{"discovery_interval":{"name":"discovery_interval","type":"uint","level":"advanced","flags":0,"default_value":"100","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"identifier":{"name":"identifier","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_host":{"name":"zabbix_host","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_port":{"name":"zabbix_port","type":"int","level":"advanced","flags":0,"default_value":"10051","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_sender":{"name":"zabbix_sender","type":"str","level":"advanced","flags":0,"default_value":"/usr/bin/zabbix_sender","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}],"services":{"dashboard":"https://192.168.123.100:8443/","prometheus":"http://192.168.123.100:9283/"},"always_on_modules":{"octopus":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"pacific":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"quincy":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"last_failure_osd_epoch":43,"active_clients":[{"addrvec":[{"type":"v2","addr":"192.168.123.100:0","nonce":3644663112}]},{"addrvec":[{"type":"v2","addr":"192.168.123.100:0","nonce":438761138}]},{"addrvec":[{"type":"v2","addr":"192.168.123.100:0","nonce":1924038199}]},{"addrvec":[{"type":"v2","addr":"192.168.123.100:0","nonce":2380814906}]}]}} 2026-03-10T13:06:47.336 INFO:tasks.cephadm.ceph_manager.ceph:mgr available! 2026-03-10T13:06:47.336 INFO:tasks.cephadm.ceph_manager.ceph:waiting for all up 2026-03-10T13:06:47.336 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph osd dump --format=json 2026-03-10T13:06:47.491 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/mon.a/config 2026-03-10T13:06:47.522 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:47 vm00 bash[65902]: Getting image source signatures 2026-03-10T13:06:47.522 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:47 vm00 bash[65902]: Copying blob sha256:b5db1e299295edf3005515ab7879c1df64a33c185d3a7a23aa4dcaa17d26f7b3 2026-03-10T13:06:47.522 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:47 vm00 bash[65902]: Copying blob sha256:aa2a8d90b84cb2a9c422e7005cd166a008ccf22ef5d7d4f07128478585ce35ea 2026-03-10T13:06:47.522 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:47 vm00 bash[65902]: Copying blob sha256:b45d31ee2d7f9f452678a85b0c837c29e12089f31ee8dbac6c8c24dfa4054a30 2026-03-10T13:06:47.863 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:47 vm00 ceph-mon[47364]: from='client.? 192.168.123.108:0/2599885353' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T13:06:47.863 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:47 vm00 ceph-mon[47364]: from='client.? ' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T13:06:47.863 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:47 vm00 ceph-mon[47364]: from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-10T13:06:47.863 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:47 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2869493075' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-10T13:06:47.863 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:47 vm00 ceph-mon[51670]: from='client.? 192.168.123.108:0/2599885353' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T13:06:47.863 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:47 vm00 ceph-mon[51670]: from='client.? ' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T13:06:47.863 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:47 vm00 ceph-mon[51670]: from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-10T13:06:47.863 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:47 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2869493075' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-10T13:06:47.863 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T13:06:47.863 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":43,"fsid":"98a3dada-1c81-11f1-89c9-d57c120f78d5","created":"2026-03-10T13:04:26.320123+0000","modified":"2026-03-10T13:06:42.766378+0000","last_up_change":"2026-03-10T13:06:35.555795+0000","last_in_change":"2026-03-10T13:06:26.067004+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":18,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"quincy","pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-10T13:05:53.703811+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"18","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}}}],"osds":[{"osd":0,"uuid":"33741dbc-5269-4c43-97b4-ac057d7a2041","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":8,"up_thru":40,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6802","nonce":771461294},{"type":"v1","addr":"192.168.123.100:6803","nonce":771461294}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6804","nonce":771461294},{"type":"v1","addr":"192.168.123.100:6805","nonce":771461294}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6808","nonce":771461294},{"type":"v1","addr":"192.168.123.100:6809","nonce":771461294}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6806","nonce":771461294},{"type":"v1","addr":"192.168.123.100:6807","nonce":771461294}]},"public_addr":"192.168.123.100:6803/771461294","cluster_addr":"192.168.123.100:6805/771461294","heartbeat_back_addr":"192.168.123.100:6809/771461294","heartbeat_front_addr":"192.168.123.100:6807/771461294","state":["exists","up"]},{"osd":1,"uuid":"31dc7b09-f48f-4ec2-8ad6-69f3b68a5138","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":11,"up_thru":26,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6810","nonce":3864481161},{"type":"v1","addr":"192.168.123.100:6811","nonce":3864481161}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6812","nonce":3864481161},{"type":"v1","addr":"192.168.123.100:6813","nonce":3864481161}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6816","nonce":3864481161},{"type":"v1","addr":"192.168.123.100:6817","nonce":3864481161}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6814","nonce":3864481161},{"type":"v1","addr":"192.168.123.100:6815","nonce":3864481161}]},"public_addr":"192.168.123.100:6811/3864481161","cluster_addr":"192.168.123.100:6813/3864481161","heartbeat_back_addr":"192.168.123.100:6817/3864481161","heartbeat_front_addr":"192.168.123.100:6815/3864481161","state":["exists","up"]},{"osd":2,"uuid":"f9f7ad09-367f-410b-9921-f31c456c313d","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":15,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6818","nonce":4074751068},{"type":"v1","addr":"192.168.123.100:6819","nonce":4074751068}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6820","nonce":4074751068},{"type":"v1","addr":"192.168.123.100:6821","nonce":4074751068}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6824","nonce":4074751068},{"type":"v1","addr":"192.168.123.100:6825","nonce":4074751068}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6822","nonce":4074751068},{"type":"v1","addr":"192.168.123.100:6823","nonce":4074751068}]},"public_addr":"192.168.123.100:6819/4074751068","cluster_addr":"192.168.123.100:6821/4074751068","heartbeat_back_addr":"192.168.123.100:6825/4074751068","heartbeat_front_addr":"192.168.123.100:6823/4074751068","state":["exists","up"]},{"osd":3,"uuid":"36dd1fdb-2d5f-4be6-b549-9bcc7e503439","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":21,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6826","nonce":441174892},{"type":"v1","addr":"192.168.123.100:6827","nonce":441174892}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6828","nonce":441174892},{"type":"v1","addr":"192.168.123.100:6829","nonce":441174892}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6832","nonce":441174892},{"type":"v1","addr":"192.168.123.100:6833","nonce":441174892}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6830","nonce":441174892},{"type":"v1","addr":"192.168.123.100:6831","nonce":441174892}]},"public_addr":"192.168.123.100:6827/441174892","cluster_addr":"192.168.123.100:6829/441174892","heartbeat_back_addr":"192.168.123.100:6833/441174892","heartbeat_front_addr":"192.168.123.100:6831/441174892","state":["exists","up"]},{"osd":4,"uuid":"2ec681f9-baf2-471e-8b59-1a1b47be1367","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":25,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6800","nonce":1094633829},{"type":"v1","addr":"192.168.123.108:6801","nonce":1094633829}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6802","nonce":1094633829},{"type":"v1","addr":"192.168.123.108:6803","nonce":1094633829}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6806","nonce":1094633829},{"type":"v1","addr":"192.168.123.108:6807","nonce":1094633829}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6804","nonce":1094633829},{"type":"v1","addr":"192.168.123.108:6805","nonce":1094633829}]},"public_addr":"192.168.123.108:6801/1094633829","cluster_addr":"192.168.123.108:6803/1094633829","heartbeat_back_addr":"192.168.123.108:6807/1094633829","heartbeat_front_addr":"192.168.123.108:6805/1094633829","state":["exists","up"]},{"osd":5,"uuid":"4b07141b-58eb-441e-a2a5-b6422715a810","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":30,"up_thru":31,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6808","nonce":658387712},{"type":"v1","addr":"192.168.123.108:6809","nonce":658387712}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6810","nonce":658387712},{"type":"v1","addr":"192.168.123.108:6811","nonce":658387712}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6814","nonce":658387712},{"type":"v1","addr":"192.168.123.108:6815","nonce":658387712}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6812","nonce":658387712},{"type":"v1","addr":"192.168.123.108:6813","nonce":658387712}]},"public_addr":"192.168.123.108:6809/658387712","cluster_addr":"192.168.123.108:6811/658387712","heartbeat_back_addr":"192.168.123.108:6815/658387712","heartbeat_front_addr":"192.168.123.108:6813/658387712","state":["exists","up"]},{"osd":6,"uuid":"8d39583c-2063-4d14-9842-a1a1a8782f74","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":35,"up_thru":36,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6816","nonce":2473564301},{"type":"v1","addr":"192.168.123.108:6817","nonce":2473564301}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6818","nonce":2473564301},{"type":"v1","addr":"192.168.123.108:6819","nonce":2473564301}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6822","nonce":2473564301},{"type":"v1","addr":"192.168.123.108:6823","nonce":2473564301}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6820","nonce":2473564301},{"type":"v1","addr":"192.168.123.108:6821","nonce":2473564301}]},"public_addr":"192.168.123.108:6817/2473564301","cluster_addr":"192.168.123.108:6819/2473564301","heartbeat_back_addr":"192.168.123.108:6823/2473564301","heartbeat_front_addr":"192.168.123.108:6821/2473564301","state":["exists","up"]},{"osd":7,"uuid":"e33bb491-3f0c-40cc-a5ae-770ea8457536","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":40,"up_thru":41,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6824","nonce":19133272},{"type":"v1","addr":"192.168.123.108:6825","nonce":19133272}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6826","nonce":19133272},{"type":"v1","addr":"192.168.123.108:6827","nonce":19133272}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6830","nonce":19133272},{"type":"v1","addr":"192.168.123.108:6831","nonce":19133272}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6828","nonce":19133272},{"type":"v1","addr":"192.168.123.108:6829","nonce":19133272}]},"public_addr":"192.168.123.108:6825/19133272","cluster_addr":"192.168.123.108:6827/19133272","heartbeat_back_addr":"192.168.123.108:6831/19133272","heartbeat_front_addr":"192.168.123.108:6829/19133272","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T13:05:29.519117+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T13:05:40.222041+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T13:05:50.560236+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T13:06:00.424824+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T13:06:09.428649+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T13:06:16.992877+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T13:06:25.955750+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T13:06:33.874148+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.100:0/1813360869":"2026-03-11T13:06:42.766354+0000","192.168.123.100:0/1569387625":"2026-03-11T13:06:42.766354+0000","192.168.123.100:0/3821537877":"2026-03-11T13:06:42.766354+0000","192.168.123.100:6801/2653325477":"2026-03-11T13:04:52.784340+0000","192.168.123.100:6800/2653325477":"2026-03-11T13:04:52.784340+0000","192.168.123.100:0/4037273490":"2026-03-11T13:04:52.784340+0000","192.168.123.100:6800/1486888834":"2026-03-11T13:06:42.766354+0000","192.168.123.100:0/4178613541":"2026-03-11T13:04:52.784340+0000","192.168.123.100:0/2030610713":"2026-03-11T13:04:52.784340+0000","192.168.123.100:0/1209684253":"2026-03-11T13:04:41.791091+0000","192.168.123.100:0/1869727854":"2026-03-11T13:06:42.766354+0000","192.168.123.100:6801/6955224":"2026-03-11T13:04:41.791091+0000","192.168.123.100:6801/1486888834":"2026-03-11T13:06:42.766354+0000","192.168.123.100:0/2363309494":"2026-03-11T13:04:41.791091+0000","192.168.123.100:6800/6955224":"2026-03-11T13:04:41.791091+0000","192.168.123.100:0/4024415478":"2026-03-11T13:04:41.791091+0000"},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-10T13:06:47.956 INFO:tasks.cephadm.ceph_manager.ceph:all up! 2026-03-10T13:06:47.956 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph osd dump --format=json 2026-03-10T13:06:48.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:47 vm08 ceph-mon[49535]: from='client.? 192.168.123.108:0/2599885353' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T13:06:48.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:47 vm08 ceph-mon[49535]: from='client.? ' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T13:06:48.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:47 vm08 ceph-mon[49535]: from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-10T13:06:48.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:47 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/2869493075' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-10T13:06:48.125 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/mon.a/config 2026-03-10T13:06:48.416 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 bash[65902]: Copying config sha256:1dbe0e931976487e20e5cfb272087e08a9779c88fd5e9617ed7042dd9751ec26 2026-03-10T13:06:48.416 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 bash[65902]: Writing manifest to image destination 2026-03-10T13:06:48.416 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 podman[65902]: 2026-03-10 13:06:48.394288989 +0000 UTC m=+2.264026314 container create 439a263972f093b39f7400664b8d35f7a329fe52988248d38b33d3afa6cf6629 (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-10T13:06:48.492 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T13:06:48.492 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":43,"fsid":"98a3dada-1c81-11f1-89c9-d57c120f78d5","created":"2026-03-10T13:04:26.320123+0000","modified":"2026-03-10T13:06:42.766378+0000","last_up_change":"2026-03-10T13:06:35.555795+0000","last_in_change":"2026-03-10T13:06:26.067004+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":18,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"quincy","pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-10T13:05:53.703811+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"18","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}}}],"osds":[{"osd":0,"uuid":"33741dbc-5269-4c43-97b4-ac057d7a2041","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":8,"up_thru":40,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6802","nonce":771461294},{"type":"v1","addr":"192.168.123.100:6803","nonce":771461294}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6804","nonce":771461294},{"type":"v1","addr":"192.168.123.100:6805","nonce":771461294}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6808","nonce":771461294},{"type":"v1","addr":"192.168.123.100:6809","nonce":771461294}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6806","nonce":771461294},{"type":"v1","addr":"192.168.123.100:6807","nonce":771461294}]},"public_addr":"192.168.123.100:6803/771461294","cluster_addr":"192.168.123.100:6805/771461294","heartbeat_back_addr":"192.168.123.100:6809/771461294","heartbeat_front_addr":"192.168.123.100:6807/771461294","state":["exists","up"]},{"osd":1,"uuid":"31dc7b09-f48f-4ec2-8ad6-69f3b68a5138","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":11,"up_thru":26,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6810","nonce":3864481161},{"type":"v1","addr":"192.168.123.100:6811","nonce":3864481161}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6812","nonce":3864481161},{"type":"v1","addr":"192.168.123.100:6813","nonce":3864481161}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6816","nonce":3864481161},{"type":"v1","addr":"192.168.123.100:6817","nonce":3864481161}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6814","nonce":3864481161},{"type":"v1","addr":"192.168.123.100:6815","nonce":3864481161}]},"public_addr":"192.168.123.100:6811/3864481161","cluster_addr":"192.168.123.100:6813/3864481161","heartbeat_back_addr":"192.168.123.100:6817/3864481161","heartbeat_front_addr":"192.168.123.100:6815/3864481161","state":["exists","up"]},{"osd":2,"uuid":"f9f7ad09-367f-410b-9921-f31c456c313d","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":15,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6818","nonce":4074751068},{"type":"v1","addr":"192.168.123.100:6819","nonce":4074751068}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6820","nonce":4074751068},{"type":"v1","addr":"192.168.123.100:6821","nonce":4074751068}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6824","nonce":4074751068},{"type":"v1","addr":"192.168.123.100:6825","nonce":4074751068}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6822","nonce":4074751068},{"type":"v1","addr":"192.168.123.100:6823","nonce":4074751068}]},"public_addr":"192.168.123.100:6819/4074751068","cluster_addr":"192.168.123.100:6821/4074751068","heartbeat_back_addr":"192.168.123.100:6825/4074751068","heartbeat_front_addr":"192.168.123.100:6823/4074751068","state":["exists","up"]},{"osd":3,"uuid":"36dd1fdb-2d5f-4be6-b549-9bcc7e503439","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":21,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6826","nonce":441174892},{"type":"v1","addr":"192.168.123.100:6827","nonce":441174892}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6828","nonce":441174892},{"type":"v1","addr":"192.168.123.100:6829","nonce":441174892}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6832","nonce":441174892},{"type":"v1","addr":"192.168.123.100:6833","nonce":441174892}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6830","nonce":441174892},{"type":"v1","addr":"192.168.123.100:6831","nonce":441174892}]},"public_addr":"192.168.123.100:6827/441174892","cluster_addr":"192.168.123.100:6829/441174892","heartbeat_back_addr":"192.168.123.100:6833/441174892","heartbeat_front_addr":"192.168.123.100:6831/441174892","state":["exists","up"]},{"osd":4,"uuid":"2ec681f9-baf2-471e-8b59-1a1b47be1367","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":25,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6800","nonce":1094633829},{"type":"v1","addr":"192.168.123.108:6801","nonce":1094633829}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6802","nonce":1094633829},{"type":"v1","addr":"192.168.123.108:6803","nonce":1094633829}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6806","nonce":1094633829},{"type":"v1","addr":"192.168.123.108:6807","nonce":1094633829}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6804","nonce":1094633829},{"type":"v1","addr":"192.168.123.108:6805","nonce":1094633829}]},"public_addr":"192.168.123.108:6801/1094633829","cluster_addr":"192.168.123.108:6803/1094633829","heartbeat_back_addr":"192.168.123.108:6807/1094633829","heartbeat_front_addr":"192.168.123.108:6805/1094633829","state":["exists","up"]},{"osd":5,"uuid":"4b07141b-58eb-441e-a2a5-b6422715a810","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":30,"up_thru":31,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6808","nonce":658387712},{"type":"v1","addr":"192.168.123.108:6809","nonce":658387712}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6810","nonce":658387712},{"type":"v1","addr":"192.168.123.108:6811","nonce":658387712}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6814","nonce":658387712},{"type":"v1","addr":"192.168.123.108:6815","nonce":658387712}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6812","nonce":658387712},{"type":"v1","addr":"192.168.123.108:6813","nonce":658387712}]},"public_addr":"192.168.123.108:6809/658387712","cluster_addr":"192.168.123.108:6811/658387712","heartbeat_back_addr":"192.168.123.108:6815/658387712","heartbeat_front_addr":"192.168.123.108:6813/658387712","state":["exists","up"]},{"osd":6,"uuid":"8d39583c-2063-4d14-9842-a1a1a8782f74","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":35,"up_thru":36,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6816","nonce":2473564301},{"type":"v1","addr":"192.168.123.108:6817","nonce":2473564301}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6818","nonce":2473564301},{"type":"v1","addr":"192.168.123.108:6819","nonce":2473564301}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6822","nonce":2473564301},{"type":"v1","addr":"192.168.123.108:6823","nonce":2473564301}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6820","nonce":2473564301},{"type":"v1","addr":"192.168.123.108:6821","nonce":2473564301}]},"public_addr":"192.168.123.108:6817/2473564301","cluster_addr":"192.168.123.108:6819/2473564301","heartbeat_back_addr":"192.168.123.108:6823/2473564301","heartbeat_front_addr":"192.168.123.108:6821/2473564301","state":["exists","up"]},{"osd":7,"uuid":"e33bb491-3f0c-40cc-a5ae-770ea8457536","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":40,"up_thru":41,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6824","nonce":19133272},{"type":"v1","addr":"192.168.123.108:6825","nonce":19133272}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6826","nonce":19133272},{"type":"v1","addr":"192.168.123.108:6827","nonce":19133272}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6830","nonce":19133272},{"type":"v1","addr":"192.168.123.108:6831","nonce":19133272}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6828","nonce":19133272},{"type":"v1","addr":"192.168.123.108:6829","nonce":19133272}]},"public_addr":"192.168.123.108:6825/19133272","cluster_addr":"192.168.123.108:6827/19133272","heartbeat_back_addr":"192.168.123.108:6831/19133272","heartbeat_front_addr":"192.168.123.108:6829/19133272","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T13:05:29.519117+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T13:05:40.222041+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T13:05:50.560236+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T13:06:00.424824+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T13:06:09.428649+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T13:06:16.992877+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T13:06:25.955750+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T13:06:33.874148+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.100:0/1813360869":"2026-03-11T13:06:42.766354+0000","192.168.123.100:0/1569387625":"2026-03-11T13:06:42.766354+0000","192.168.123.100:0/3821537877":"2026-03-11T13:06:42.766354+0000","192.168.123.100:6801/2653325477":"2026-03-11T13:04:52.784340+0000","192.168.123.100:6800/2653325477":"2026-03-11T13:04:52.784340+0000","192.168.123.100:0/4037273490":"2026-03-11T13:04:52.784340+0000","192.168.123.100:6800/1486888834":"2026-03-11T13:06:42.766354+0000","192.168.123.100:0/4178613541":"2026-03-11T13:04:52.784340+0000","192.168.123.100:0/2030610713":"2026-03-11T13:04:52.784340+0000","192.168.123.100:0/1209684253":"2026-03-11T13:04:41.791091+0000","192.168.123.100:0/1869727854":"2026-03-11T13:06:42.766354+0000","192.168.123.100:6801/6955224":"2026-03-11T13:04:41.791091+0000","192.168.123.100:6801/1486888834":"2026-03-11T13:06:42.766354+0000","192.168.123.100:0/2363309494":"2026-03-11T13:04:41.791091+0000","192.168.123.100:6800/6955224":"2026-03-11T13:04:41.791091+0000","192.168.123.100:0/4024415478":"2026-03-11T13:04:41.791091+0000"},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-10T13:06:48.560 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph tell osd.0 flush_pg_stats 2026-03-10T13:06:48.560 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph tell osd.1 flush_pg_stats 2026-03-10T13:06:48.560 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph tell osd.2 flush_pg_stats 2026-03-10T13:06:48.560 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph tell osd.3 flush_pg_stats 2026-03-10T13:06:48.560 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph tell osd.4 flush_pg_stats 2026-03-10T13:06:48.561 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph tell osd.5 flush_pg_stats 2026-03-10T13:06:48.561 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph tell osd.6 flush_pg_stats 2026-03-10T13:06:48.561 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph tell osd.7 flush_pg_stats 2026-03-10T13:06:48.678 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-mon[47364]: pgmap v5: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:06:48.678 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/3735027545' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T13:06:48.678 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:48.678 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-mon[47364]: Deploying daemon node-exporter.b on vm08 2026-03-10T13:06:48.678 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2789793686' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T13:06:48.679 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:48 vm00 ceph-mon[51670]: pgmap v5: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:06:48.679 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:48 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/3735027545' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T13:06:48.679 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:48 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:48.679 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:48 vm00 ceph-mon[51670]: Deploying daemon node-exporter.b on vm08 2026-03-10T13:06:48.679 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:48 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2789793686' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 podman[65902]: 2026-03-10 13:06:48.420527749 +0000 UTC m=+2.290265074 container init 439a263972f093b39f7400664b8d35f7a329fe52988248d38b33d3afa6cf6629 (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 podman[65902]: 2026-03-10 13:06:48.42287199 +0000 UTC m=+2.292609315 container start 439a263972f093b39f7400664b8d35f7a329fe52988248d38b33d3afa6cf6629 (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.426Z caller=node_exporter.go:182 level=info msg="Starting node_exporter" version="(version=1.3.1, branch=HEAD, revision=a2321e7b940ddcff26873612bccdf7cd4c42b6b6)" 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.426Z caller=node_exporter.go:183 level=info msg="Build context" build_context="(go=go1.17.3, user=root@243aafa5525c, date=20211205-11:09:49)" 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.427Z caller=filesystem_common.go:111 level=info collector=filesystem msg="Parsed flag --collector.filesystem.mount-points-exclude" flag=^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+)($|/) 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.427Z caller=filesystem_common.go:113 level=info collector=filesystem msg="Parsed flag --collector.filesystem.fs-types-exclude" flag=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$ 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.427Z caller=node_exporter.go:108 level=info msg="Enabled collectors" 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.427Z caller=node_exporter.go:115 level=info collector=arp 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.427Z caller=node_exporter.go:115 level=info collector=bcache 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.427Z caller=node_exporter.go:115 level=info collector=bonding 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.427Z caller=node_exporter.go:115 level=info collector=btrfs 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.427Z caller=node_exporter.go:115 level=info collector=conntrack 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.427Z caller=node_exporter.go:115 level=info collector=cpu 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.427Z caller=node_exporter.go:115 level=info collector=cpufreq 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 bash[65902]: 439a263972f093b39f7400664b8d35f7a329fe52988248d38b33d3afa6cf6629 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 podman[65902]: 2026-03-10 13:06:48.382128147 +0000 UTC m=+2.251865472 image pull 1dbe0e931976487e20e5cfb272087e08a9779c88fd5e9617ed7042dd9751ec26 quay.io/prometheus/node-exporter:v1.3.1 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.428Z caller=node_exporter.go:115 level=info collector=diskstats 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.428Z caller=node_exporter.go:115 level=info collector=dmi 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.428Z caller=node_exporter.go:115 level=info collector=edac 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.428Z caller=node_exporter.go:115 level=info collector=entropy 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.428Z caller=node_exporter.go:115 level=info collector=fibrechannel 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.428Z caller=node_exporter.go:115 level=info collector=filefd 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.428Z caller=node_exporter.go:115 level=info collector=filesystem 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.428Z caller=node_exporter.go:115 level=info collector=hwmon 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.428Z caller=node_exporter.go:115 level=info collector=infiniband 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.428Z caller=node_exporter.go:115 level=info collector=ipvs 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.428Z caller=node_exporter.go:115 level=info collector=loadavg 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.428Z caller=node_exporter.go:115 level=info collector=mdadm 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.428Z caller=node_exporter.go:115 level=info collector=meminfo 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.428Z caller=node_exporter.go:115 level=info collector=netclass 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.428Z caller=node_exporter.go:115 level=info collector=netdev 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.428Z caller=node_exporter.go:115 level=info collector=netstat 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.429Z caller=node_exporter.go:115 level=info collector=nfs 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.429Z caller=node_exporter.go:115 level=info collector=nfsd 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.429Z caller=node_exporter.go:115 level=info collector=nvme 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.429Z caller=node_exporter.go:115 level=info collector=os 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.429Z caller=node_exporter.go:115 level=info collector=powersupplyclass 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.429Z caller=node_exporter.go:115 level=info collector=pressure 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.429Z caller=node_exporter.go:115 level=info collector=rapl 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.429Z caller=node_exporter.go:115 level=info collector=schedstat 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.429Z caller=node_exporter.go:115 level=info collector=sockstat 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.429Z caller=node_exporter.go:115 level=info collector=softnet 2026-03-10T13:06:48.679 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.429Z caller=node_exporter.go:115 level=info collector=stat 2026-03-10T13:06:48.680 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.429Z caller=node_exporter.go:115 level=info collector=tapestats 2026-03-10T13:06:48.680 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.429Z caller=node_exporter.go:115 level=info collector=textfile 2026-03-10T13:06:48.680 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.429Z caller=node_exporter.go:115 level=info collector=thermal_zone 2026-03-10T13:06:48.680 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.429Z caller=node_exporter.go:115 level=info collector=time 2026-03-10T13:06:48.680 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.429Z caller=node_exporter.go:115 level=info collector=udp_queues 2026-03-10T13:06:48.680 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.429Z caller=node_exporter.go:115 level=info collector=uname 2026-03-10T13:06:48.680 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.429Z caller=node_exporter.go:115 level=info collector=vmstat 2026-03-10T13:06:48.680 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.429Z caller=node_exporter.go:115 level=info collector=xfs 2026-03-10T13:06:48.680 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.429Z caller=node_exporter.go:115 level=info collector=zfs 2026-03-10T13:06:48.680 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.429Z caller=node_exporter.go:199 level=info msg="Listening on" address=:9100 2026-03-10T13:06:48.680 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[66334]: ts=2026-03-10T13:06:48.430Z caller=tls_config.go:195 level=info msg="TLS is disabled." http2=false 2026-03-10T13:06:48.680 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:06:48 vm00 systemd[1]: Started Ceph node-exporter.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:06:48.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:48 vm08 ceph-mon[49535]: pgmap v5: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:06:48.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:48 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/3735027545' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T13:06:48.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:48 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:48.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:48 vm08 ceph-mon[49535]: Deploying daemon node-exporter.b on vm08 2026-03-10T13:06:48.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:48 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/2789793686' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T13:06:49.094 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:48 vm08 systemd[1]: Starting Ceph node-exporter.b for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:06:49.142 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/mon.a/config 2026-03-10T13:06:49.205 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/mon.a/config 2026-03-10T13:06:49.208 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/mon.a/config 2026-03-10T13:06:49.249 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/mon.a/config 2026-03-10T13:06:49.256 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/mon.a/config 2026-03-10T13:06:49.263 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/mon.a/config 2026-03-10T13:06:49.333 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/mon.a/config 2026-03-10T13:06:49.382 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/mon.a/config 2026-03-10T13:06:49.521 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:49 vm08 bash[64057]: Trying to pull quay.io/prometheus/node-exporter:v1.3.1... 2026-03-10T13:06:50.196 INFO:teuthology.orchestra.run.vm00.stdout:34359738385 2026-03-10T13:06:50.196 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph osd last-stat-seq osd.0 2026-03-10T13:06:50.313 INFO:teuthology.orchestra.run.vm00.stdout:47244640271 2026-03-10T13:06:50.313 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph osd last-stat-seq osd.1 2026-03-10T13:06:50.561 INFO:teuthology.orchestra.run.vm00.stdout:150323855366 2026-03-10T13:06:50.561 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph osd last-stat-seq osd.6 2026-03-10T13:06:50.582 INFO:teuthology.orchestra.run.vm00.stdout:171798691844 2026-03-10T13:06:50.582 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph osd last-stat-seq osd.7 2026-03-10T13:06:50.621 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:50 vm08 bash[64057]: Getting image source signatures 2026-03-10T13:06:50.621 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:50 vm08 bash[64057]: Copying blob sha256:b5db1e299295edf3005515ab7879c1df64a33c185d3a7a23aa4dcaa17d26f7b3 2026-03-10T13:06:50.621 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:50 vm08 bash[64057]: Copying blob sha256:aa2a8d90b84cb2a9c422e7005cd166a008ccf22ef5d7d4f07128478585ce35ea 2026-03-10T13:06:50.621 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:50 vm08 bash[64057]: Copying blob sha256:b45d31ee2d7f9f452678a85b0c837c29e12089f31ee8dbac6c8c24dfa4054a30 2026-03-10T13:06:50.646 INFO:teuthology.orchestra.run.vm00.stdout:128849018888 2026-03-10T13:06:50.647 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph osd last-stat-seq osd.5 2026-03-10T13:06:50.697 INFO:teuthology.orchestra.run.vm00.stdout:90194313227 2026-03-10T13:06:50.697 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph osd last-stat-seq osd.3 2026-03-10T13:06:50.723 INFO:teuthology.orchestra.run.vm00.stdout:64424509453 2026-03-10T13:06:50.723 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph osd last-stat-seq osd.2 2026-03-10T13:06:50.757 INFO:teuthology.orchestra.run.vm00.stdout:107374182409 2026-03-10T13:06:50.757 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph osd last-stat-seq osd.4 2026-03-10T13:06:50.777 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:50 vm00 ceph-mon[47364]: pgmap v6: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:06:50.777 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:50 vm00 ceph-mon[51670]: pgmap v6: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:06:50.851 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/mon.a/config 2026-03-10T13:06:50.917 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/mon.a/config 2026-03-10T13:06:51.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:50 vm08 ceph-mon[49535]: pgmap v6: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:06:51.522 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 bash[64057]: Copying config sha256:1dbe0e931976487e20e5cfb272087e08a9779c88fd5e9617ed7042dd9751ec26 2026-03-10T13:06:51.522 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 bash[64057]: Writing manifest to image destination 2026-03-10T13:06:51.522 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 podman[64057]: 2026-03-10 13:06:51.095933664 +0000 UTC m=+2.017571591 image pull 1dbe0e931976487e20e5cfb272087e08a9779c88fd5e9617ed7042dd9751ec26 quay.io/prometheus/node-exporter:v1.3.1 2026-03-10T13:06:51.522 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 podman[64057]: 2026-03-10 13:06:51.101919474 +0000 UTC m=+2.023557391 container create d5ba7ccd220b2d9fddc9e021137534a1e6cf2e404e9fefd74db924d1cc76345a (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-10T13:06:51.522 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 podman[64057]: 2026-03-10 13:06:51.129596079 +0000 UTC m=+2.051233996 container init d5ba7ccd220b2d9fddc9e021137534a1e6cf2e404e9fefd74db924d1cc76345a (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-10T13:06:51.522 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 podman[64057]: 2026-03-10 13:06:51.132613109 +0000 UTC m=+2.054251026 container start d5ba7ccd220b2d9fddc9e021137534a1e6cf2e404e9fefd74db924d1cc76345a (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-10T13:06:51.522 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 bash[64057]: d5ba7ccd220b2d9fddc9e021137534a1e6cf2e404e9fefd74db924d1cc76345a 2026-03-10T13:06:51.522 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 systemd[1]: Started Ceph node-exporter.b for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:06:51.522 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.143Z caller=node_exporter.go:182 level=info msg="Starting node_exporter" version="(version=1.3.1, branch=HEAD, revision=a2321e7b940ddcff26873612bccdf7cd4c42b6b6)" 2026-03-10T13:06:51.522 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.144Z caller=node_exporter.go:183 level=info msg="Build context" build_context="(go=go1.17.3, user=root@243aafa5525c, date=20211205-11:09:49)" 2026-03-10T13:06:51.522 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.144Z caller=filesystem_common.go:111 level=info collector=filesystem msg="Parsed flag --collector.filesystem.mount-points-exclude" flag=^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+)($|/) 2026-03-10T13:06:51.522 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.144Z caller=filesystem_common.go:113 level=info collector=filesystem msg="Parsed flag --collector.filesystem.fs-types-exclude" flag=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$ 2026-03-10T13:06:51.522 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.144Z caller=node_exporter.go:108 level=info msg="Enabled collectors" 2026-03-10T13:06:51.522 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.144Z caller=node_exporter.go:115 level=info collector=arp 2026-03-10T13:06:51.522 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.144Z caller=node_exporter.go:115 level=info collector=bcache 2026-03-10T13:06:51.522 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.144Z caller=node_exporter.go:115 level=info collector=bonding 2026-03-10T13:06:51.522 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=btrfs 2026-03-10T13:06:51.522 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=conntrack 2026-03-10T13:06:51.522 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=cpu 2026-03-10T13:06:51.522 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=cpufreq 2026-03-10T13:06:51.522 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=diskstats 2026-03-10T13:06:51.522 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=dmi 2026-03-10T13:06:51.522 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=edac 2026-03-10T13:06:51.522 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=entropy 2026-03-10T13:06:51.522 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=fibrechannel 2026-03-10T13:06:51.522 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=filefd 2026-03-10T13:06:51.522 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=filesystem 2026-03-10T13:06:51.522 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=hwmon 2026-03-10T13:06:51.522 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=infiniband 2026-03-10T13:06:51.522 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=ipvs 2026-03-10T13:06:51.522 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=loadavg 2026-03-10T13:06:51.522 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=mdadm 2026-03-10T13:06:51.522 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=meminfo 2026-03-10T13:06:51.522 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=netclass 2026-03-10T13:06:51.522 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=netdev 2026-03-10T13:06:51.522 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=netstat 2026-03-10T13:06:51.522 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=nfs 2026-03-10T13:06:51.522 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=nfsd 2026-03-10T13:06:51.523 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=nvme 2026-03-10T13:06:51.523 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=os 2026-03-10T13:06:51.523 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=powersupplyclass 2026-03-10T13:06:51.523 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=pressure 2026-03-10T13:06:51.523 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=rapl 2026-03-10T13:06:51.523 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=schedstat 2026-03-10T13:06:51.523 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=sockstat 2026-03-10T13:06:51.523 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=softnet 2026-03-10T13:06:51.523 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=stat 2026-03-10T13:06:51.523 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=tapestats 2026-03-10T13:06:51.523 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=textfile 2026-03-10T13:06:51.523 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=thermal_zone 2026-03-10T13:06:51.523 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=time 2026-03-10T13:06:51.523 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=udp_queues 2026-03-10T13:06:51.523 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=uname 2026-03-10T13:06:51.523 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=vmstat 2026-03-10T13:06:51.523 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=xfs 2026-03-10T13:06:51.523 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:115 level=info collector=zfs 2026-03-10T13:06:51.523 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.145Z caller=node_exporter.go:199 level=info msg="Listening on" address=:9100 2026-03-10T13:06:51.523 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:06:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[64111]: ts=2026-03-10T13:06:51.146Z caller=tls_config.go:195 level=info msg="TLS is disabled." http2=false 2026-03-10T13:06:51.589 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/mon.a/config 2026-03-10T13:06:51.590 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/mon.a/config 2026-03-10T13:06:51.596 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/mon.a/config 2026-03-10T13:06:51.603 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/mon.a/config 2026-03-10T13:06:51.607 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/mon.a/config 2026-03-10T13:06:51.622 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/mon.a/config 2026-03-10T13:06:51.684 INFO:teuthology.orchestra.run.vm00.stdout:47244640269 2026-03-10T13:06:51.910 INFO:tasks.cephadm.ceph_manager.ceph:need seq 47244640271 got 47244640269 for osd.1 2026-03-10T13:06:52.120 INFO:teuthology.orchestra.run.vm00.stdout:34359738383 2026-03-10T13:06:52.272 INFO:tasks.cephadm.ceph_manager.ceph:need seq 34359738385 got 34359738383 for osd.0 2026-03-10T13:06:52.342 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:52 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:52.342 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:52 vm00 ceph-mon[47364]: Deploying daemon prometheus.a on vm08 2026-03-10T13:06:52.342 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:52 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/1481164602' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-10T13:06:52.342 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:52 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/312499509' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-10T13:06:52.342 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:52 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:52.342 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:52 vm00 ceph-mon[51670]: Deploying daemon prometheus.a on vm08 2026-03-10T13:06:52.342 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:52 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/1481164602' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-10T13:06:52.342 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:52 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/312499509' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-10T13:06:52.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:52 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:52.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:52 vm08 ceph-mon[49535]: Deploying daemon prometheus.a on vm08 2026-03-10T13:06:52.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:52 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/1481164602' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-10T13:06:52.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:52 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/312499509' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-10T13:06:52.853 INFO:teuthology.orchestra.run.vm00.stdout:107374182409 2026-03-10T13:06:52.865 INFO:teuthology.orchestra.run.vm00.stdout:150323855366 2026-03-10T13:06:52.885 INFO:teuthology.orchestra.run.vm00.stdout:64424509453 2026-03-10T13:06:52.886 INFO:teuthology.orchestra.run.vm00.stdout:128849018888 2026-03-10T13:06:52.910 INFO:teuthology.orchestra.run.vm00.stdout:171798691844 2026-03-10T13:06:52.910 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph osd last-stat-seq osd.1 2026-03-10T13:06:53.066 INFO:teuthology.orchestra.run.vm00.stdout:90194313227 2026-03-10T13:06:53.169 INFO:tasks.cephadm.ceph_manager.ceph:need seq 128849018888 got 128849018888 for osd.5 2026-03-10T13:06:53.169 DEBUG:teuthology.parallel:result is None 2026-03-10T13:06:53.187 INFO:tasks.cephadm.ceph_manager.ceph:need seq 171798691844 got 171798691844 for osd.7 2026-03-10T13:06:53.187 DEBUG:teuthology.parallel:result is None 2026-03-10T13:06:53.187 INFO:tasks.cephadm.ceph_manager.ceph:need seq 150323855366 got 150323855366 for osd.6 2026-03-10T13:06:53.187 DEBUG:teuthology.parallel:result is None 2026-03-10T13:06:53.225 INFO:tasks.cephadm.ceph_manager.ceph:need seq 107374182409 got 107374182409 for osd.4 2026-03-10T13:06:53.226 DEBUG:teuthology.parallel:result is None 2026-03-10T13:06:53.233 INFO:tasks.cephadm.ceph_manager.ceph:need seq 64424509453 got 64424509453 for osd.2 2026-03-10T13:06:53.234 DEBUG:teuthology.parallel:result is None 2026-03-10T13:06:53.235 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/mon.a/config 2026-03-10T13:06:53.242 INFO:tasks.cephadm.ceph_manager.ceph:need seq 90194313227 got 90194313227 for osd.3 2026-03-10T13:06:53.242 DEBUG:teuthology.parallel:result is None 2026-03-10T13:06:53.273 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph osd last-stat-seq osd.0 2026-03-10T13:06:53.379 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:53 vm00 ceph-mon[47364]: pgmap v7: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:06:53.379 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:53 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/3340009084' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 4}]: dispatch 2026-03-10T13:06:53.379 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:53 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/874652582' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 6}]: dispatch 2026-03-10T13:06:53.379 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:53 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/3304748593' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-10T13:06:53.379 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:53 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2783112194' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-10T13:06:53.379 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:53 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/20814965' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 7}]: dispatch 2026-03-10T13:06:53.379 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:53 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2321176682' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-10T13:06:53.379 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:53 vm00 ceph-mon[51670]: pgmap v7: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:06:53.379 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:53 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/3340009084' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 4}]: dispatch 2026-03-10T13:06:53.379 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:53 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/874652582' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 6}]: dispatch 2026-03-10T13:06:53.379 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:53 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/3304748593' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-10T13:06:53.379 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:53 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2783112194' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-10T13:06:53.379 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:53 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/20814965' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 7}]: dispatch 2026-03-10T13:06:53.379 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:53 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2321176682' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-10T13:06:53.510 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/mon.a/config 2026-03-10T13:06:53.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:53 vm08 ceph-mon[49535]: pgmap v7: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:06:53.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:53 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/3340009084' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 4}]: dispatch 2026-03-10T13:06:53.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:53 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/874652582' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 6}]: dispatch 2026-03-10T13:06:53.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:53 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/3304748593' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-10T13:06:53.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:53 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/2783112194' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-10T13:06:53.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:53 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/20814965' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 7}]: dispatch 2026-03-10T13:06:53.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:53 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/2321176682' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-10T13:06:53.668 INFO:teuthology.orchestra.run.vm00.stdout:47244640271 2026-03-10T13:06:53.744 INFO:tasks.cephadm.ceph_manager.ceph:need seq 47244640271 got 47244640271 for osd.1 2026-03-10T13:06:53.744 DEBUG:teuthology.parallel:result is None 2026-03-10T13:06:53.884 INFO:teuthology.orchestra.run.vm00.stdout:34359738385 2026-03-10T13:06:53.933 INFO:tasks.cephadm.ceph_manager.ceph:need seq 34359738385 got 34359738385 for osd.0 2026-03-10T13:06:53.933 DEBUG:teuthology.parallel:result is None 2026-03-10T13:06:53.933 INFO:tasks.cephadm.ceph_manager.ceph:waiting for clean 2026-03-10T13:06:53.933 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph pg dump --format=json 2026-03-10T13:06:54.094 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/mon.a/config 2026-03-10T13:06:54.469 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T13:06:54.470 INFO:teuthology.orchestra.run.vm00.stderr:dumped all 2026-03-10T13:06:54.517 INFO:teuthology.orchestra.run.vm00.stdout:{"pg_ready":true,"pg_map":{"version":8,"stamp":"2026-03-10T13:06:53.570366+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":192,"num_read_kb":288,"num_write":133,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":87,"ondisk_log_size":87,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":3,"num_osds":8,"num_per_pool_osds":3,"num_per_pool_omap_osds":3,"kb":167739392,"kb_used":48524,"kb_used_data":4428,"kb_used_omap":0,"kb_used_meta":44032,"kb_avail":167690868,"statfs":{"total":171765137408,"available":171715448832,"internally_reserved":0,"allocated":4534272,"data_stored":2572115,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":45088768},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"8.974978"},"pg_stats":[{"pgid":"1.0","version":"43'87","reported_seq":56,"reported_epoch":43,"state":"active+clean","last_fresh":"2026-03-10T13:06:43.688330+0000","last_change":"2026-03-10T13:06:37.892806+0000","last_active":"2026-03-10T13:06:43.688330+0000","last_peered":"2026-03-10T13:06:43.688330+0000","last_clean":"2026-03-10T13:06:43.688330+0000","last_became_active":"2026-03-10T13:06:37.586821+0000","last_became_peered":"2026-03-10T13:06:37.586821+0000","last_unstale":"2026-03-10T13:06:43.688330+0000","last_undegraded":"2026-03-10T13:06:43.688330+0000","last_fullsized":"2026-03-10T13:06:43.688330+0000","mapping_epoch":41,"log_start":"0'0","ondisk_log_start":"0'0","created":16,"last_epoch_clean":42,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-03-10T13:05:54.165705+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-03-10T13:05:54.165705+0000","last_clean_scrub_stamp":"2026-03-10T13:05:54.165705+0000","objects_scrubbed":0,"log_size":87,"ondisk_log_size":87,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-03-11T23:01:36.232346+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":192,"num_read_kb":288,"num_write":133,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[7,0,6],"acting":[7,0,6],"avail_no_missing":[],"object_location_counts":[],"blocked_by":[],"up_primary":7,"acting_primary":7,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":192,"num_read_kb":288,"num_write":133,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":1204224,"data_stored":1193520,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":87,"ondisk_log_size":87,"up":3,"acting":3,"num_store_stats":3}],"osd_stats":[{"osd":7,"up_from":40,"seq":171798691844,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6056,"kb_used_data":800,"kb_used_omap":0,"kb_used_meta":5248,"kb_avail":20961368,"statfs":{"total":21470642176,"available":21464440832,"internally_reserved":0,"allocated":819200,"data_stored":570271,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5373952},"hb_peers":[0,1,2,3,4,5,6],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.64200000000000002}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.84199999999999997}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.91700000000000004}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.624}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.71599999999999997}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.65900000000000003}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.69199999999999995}]}]},{"osd":6,"up_from":35,"seq":150323855366,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6052,"kb_used_data":796,"kb_used_omap":0,"kb_used_meta":5248,"kb_avail":20961372,"statfs":{"total":21470642176,"available":21464444928,"internally_reserved":0,"allocated":815104,"data_stored":570006,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5373952},"hb_peers":[0,1,2,3,4,5,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.67800000000000005}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.70299999999999996}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.46800000000000003}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.71899999999999997}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.60199999999999998}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.42599999999999999}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.77500000000000002}]}]},{"osd":1,"up_from":11,"seq":47244640271,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":6304,"kb_used_data":408,"kb_used_omap":0,"kb_used_meta":5888,"kb_avail":20961120,"statfs":{"total":21470642176,"available":21464186880,"internally_reserved":0,"allocated":417792,"data_stored":172431,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":6029312},"hb_peers":[0,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Tue Mar 10 13:06:42 2026","interfaces":[{"interface":"back","average":{"1min":0.46600000000000003,"5min":0.46600000000000003,"15min":0.46600000000000003},"min":{"1min":0.221,"5min":0.221,"15min":0.221},"max":{"1min":1.3240000000000001,"5min":1.3240000000000001,"15min":1.3240000000000001},"last":0.83499999999999996},{"interface":"front","average":{"1min":0.47499999999999998,"5min":0.47499999999999998,"15min":0.47499999999999998},"min":{"1min":0.22900000000000001,"5min":0.22900000000000001,"15min":0.22900000000000001},"max":{"1min":1.3129999999999999,"5min":1.3129999999999999,"15min":1.3129999999999999},"last":0.91600000000000004}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.84699999999999998}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.95699999999999996}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.93100000000000005}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.93799999999999994}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.82499999999999996}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.0329999999999999}]}]},{"osd":0,"up_from":8,"seq":34359738385,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6696,"kb_used_data":800,"kb_used_omap":0,"kb_used_meta":5888,"kb_avail":20960728,"statfs":{"total":21470642176,"available":21463785472,"internally_reserved":0,"allocated":819200,"data_stored":570271,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":6029312},"hb_peers":[1,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":1,"last update":"Tue Mar 10 13:06:42 2026","interfaces":[{"interface":"back","average":{"1min":0.63100000000000001,"5min":0.63100000000000001,"15min":0.63100000000000001},"min":{"1min":0.22900000000000001,"5min":0.22900000000000001,"15min":0.22900000000000001},"max":{"1min":2.1760000000000002,"5min":2.1760000000000002,"15min":2.1760000000000002},"last":0.627},{"interface":"front","average":{"1min":0.501,"5min":0.501,"15min":0.501},"min":{"1min":0.27800000000000002,"5min":0.27800000000000002,"15min":0.27800000000000002},"max":{"1min":1.0489999999999999,"5min":1.0489999999999999,"15min":1.0489999999999999},"last":0.68100000000000005}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.70199999999999996}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.59299999999999997}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.66700000000000004}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.61099999999999999}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.64400000000000002}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.72899999999999998}]}]},{"osd":2,"up_from":15,"seq":64424509453,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":6236,"kb_used_data":404,"kb_used_omap":0,"kb_used_meta":5824,"kb_avail":20961188,"statfs":{"total":21470642176,"available":21464256512,"internally_reserved":0,"allocated":413696,"data_stored":172137,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5963776},"hb_peers":[0,1,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.85799999999999998}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.71299999999999997}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.49199999999999999}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.90900000000000003}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.89900000000000002}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.77700000000000002}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.85999999999999999}]}]},{"osd":3,"up_from":21,"seq":90194313227,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5728,"kb_used_data":408,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961696,"statfs":{"total":21470642176,"available":21464776704,"internally_reserved":0,"allocated":417792,"data_stored":172431,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.64400000000000002}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.55400000000000005}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.78300000000000003}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.72599999999999998}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.77500000000000002}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.67600000000000005}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.69999999999999996}]}]},{"osd":4,"up_from":25,"seq":107374182409,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5728,"kb_used_data":408,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961696,"statfs":{"total":21470642176,"available":21464776704,"internally_reserved":0,"allocated":417792,"data_stored":172431,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,3,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.67300000000000004}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.35399999999999998}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.70699999999999996}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.51400000000000001}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.65800000000000003}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.56100000000000005}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.32600000000000001}]}]},{"osd":5,"up_from":30,"seq":128849018888,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5724,"kb_used_data":404,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961700,"statfs":{"total":21470642176,"available":21464780800,"internally_reserved":0,"allocated":413696,"data_stored":172137,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,3,4,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.59699999999999998}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.67300000000000004}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.57899999999999996}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.502}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.70199999999999996}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.61799999999999999}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.63300000000000001}]}]}],"pool_statfs":[{"poolid":1,"osd":0,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":6,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":7,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-03-10T13:06:54.517 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph pg dump --format=json 2026-03-10T13:06:54.673 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/mon.a/config 2026-03-10T13:06:54.728 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:54 vm00 ceph-mon[47364]: pgmap v8: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:06:54.729 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:54 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:54.729 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:54 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2860563482' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-10T13:06:54.729 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:54 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/501270293' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-10T13:06:54.729 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:54 vm00 ceph-mon[47364]: from='client.24442 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T13:06:54.729 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:54 vm00 ceph-mon[51670]: pgmap v8: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:06:54.729 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:54 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:54.729 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:54 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2860563482' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-10T13:06:54.729 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:54 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/501270293' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-10T13:06:54.729 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:54 vm00 ceph-mon[51670]: from='client.24442 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T13:06:54.905 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:54 vm08 ceph-mon[49535]: pgmap v8: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:06:54.905 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:54 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:54.905 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:54 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/2860563482' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-10T13:06:54.905 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:54 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/501270293' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-10T13:06:54.905 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:54 vm08 ceph-mon[49535]: from='client.24442 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T13:06:55.010 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T13:06:55.013 INFO:teuthology.orchestra.run.vm00.stderr:dumped all 2026-03-10T13:06:55.062 INFO:teuthology.orchestra.run.vm00.stdout:{"pg_ready":true,"pg_map":{"version":8,"stamp":"2026-03-10T13:06:53.570366+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":192,"num_read_kb":288,"num_write":133,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":87,"ondisk_log_size":87,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":3,"num_osds":8,"num_per_pool_osds":3,"num_per_pool_omap_osds":3,"kb":167739392,"kb_used":48524,"kb_used_data":4428,"kb_used_omap":0,"kb_used_meta":44032,"kb_avail":167690868,"statfs":{"total":171765137408,"available":171715448832,"internally_reserved":0,"allocated":4534272,"data_stored":2572115,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":45088768},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"8.974978"},"pg_stats":[{"pgid":"1.0","version":"43'87","reported_seq":56,"reported_epoch":43,"state":"active+clean","last_fresh":"2026-03-10T13:06:43.688330+0000","last_change":"2026-03-10T13:06:37.892806+0000","last_active":"2026-03-10T13:06:43.688330+0000","last_peered":"2026-03-10T13:06:43.688330+0000","last_clean":"2026-03-10T13:06:43.688330+0000","last_became_active":"2026-03-10T13:06:37.586821+0000","last_became_peered":"2026-03-10T13:06:37.586821+0000","last_unstale":"2026-03-10T13:06:43.688330+0000","last_undegraded":"2026-03-10T13:06:43.688330+0000","last_fullsized":"2026-03-10T13:06:43.688330+0000","mapping_epoch":41,"log_start":"0'0","ondisk_log_start":"0'0","created":16,"last_epoch_clean":42,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-03-10T13:05:54.165705+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-03-10T13:05:54.165705+0000","last_clean_scrub_stamp":"2026-03-10T13:05:54.165705+0000","objects_scrubbed":0,"log_size":87,"ondisk_log_size":87,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-03-11T23:01:36.232346+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":192,"num_read_kb":288,"num_write":133,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[7,0,6],"acting":[7,0,6],"avail_no_missing":[],"object_location_counts":[],"blocked_by":[],"up_primary":7,"acting_primary":7,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":192,"num_read_kb":288,"num_write":133,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":1204224,"data_stored":1193520,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":87,"ondisk_log_size":87,"up":3,"acting":3,"num_store_stats":3}],"osd_stats":[{"osd":7,"up_from":40,"seq":171798691844,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6056,"kb_used_data":800,"kb_used_omap":0,"kb_used_meta":5248,"kb_avail":20961368,"statfs":{"total":21470642176,"available":21464440832,"internally_reserved":0,"allocated":819200,"data_stored":570271,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5373952},"hb_peers":[0,1,2,3,4,5,6],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.64200000000000002}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.84199999999999997}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.91700000000000004}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.624}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.71599999999999997}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.65900000000000003}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.69199999999999995}]}]},{"osd":6,"up_from":35,"seq":150323855366,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6052,"kb_used_data":796,"kb_used_omap":0,"kb_used_meta":5248,"kb_avail":20961372,"statfs":{"total":21470642176,"available":21464444928,"internally_reserved":0,"allocated":815104,"data_stored":570006,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5373952},"hb_peers":[0,1,2,3,4,5,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.67800000000000005}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.70299999999999996}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.46800000000000003}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.71899999999999997}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.60199999999999998}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.42599999999999999}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.77500000000000002}]}]},{"osd":1,"up_from":11,"seq":47244640271,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":6304,"kb_used_data":408,"kb_used_omap":0,"kb_used_meta":5888,"kb_avail":20961120,"statfs":{"total":21470642176,"available":21464186880,"internally_reserved":0,"allocated":417792,"data_stored":172431,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":6029312},"hb_peers":[0,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Tue Mar 10 13:06:42 2026","interfaces":[{"interface":"back","average":{"1min":0.46600000000000003,"5min":0.46600000000000003,"15min":0.46600000000000003},"min":{"1min":0.221,"5min":0.221,"15min":0.221},"max":{"1min":1.3240000000000001,"5min":1.3240000000000001,"15min":1.3240000000000001},"last":0.83499999999999996},{"interface":"front","average":{"1min":0.47499999999999998,"5min":0.47499999999999998,"15min":0.47499999999999998},"min":{"1min":0.22900000000000001,"5min":0.22900000000000001,"15min":0.22900000000000001},"max":{"1min":1.3129999999999999,"5min":1.3129999999999999,"15min":1.3129999999999999},"last":0.91600000000000004}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.84699999999999998}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.95699999999999996}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.93100000000000005}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.93799999999999994}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.82499999999999996}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.0329999999999999}]}]},{"osd":0,"up_from":8,"seq":34359738385,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6696,"kb_used_data":800,"kb_used_omap":0,"kb_used_meta":5888,"kb_avail":20960728,"statfs":{"total":21470642176,"available":21463785472,"internally_reserved":0,"allocated":819200,"data_stored":570271,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":6029312},"hb_peers":[1,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":1,"last update":"Tue Mar 10 13:06:42 2026","interfaces":[{"interface":"back","average":{"1min":0.63100000000000001,"5min":0.63100000000000001,"15min":0.63100000000000001},"min":{"1min":0.22900000000000001,"5min":0.22900000000000001,"15min":0.22900000000000001},"max":{"1min":2.1760000000000002,"5min":2.1760000000000002,"15min":2.1760000000000002},"last":0.627},{"interface":"front","average":{"1min":0.501,"5min":0.501,"15min":0.501},"min":{"1min":0.27800000000000002,"5min":0.27800000000000002,"15min":0.27800000000000002},"max":{"1min":1.0489999999999999,"5min":1.0489999999999999,"15min":1.0489999999999999},"last":0.68100000000000005}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.70199999999999996}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.59299999999999997}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.66700000000000004}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.61099999999999999}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.64400000000000002}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.72899999999999998}]}]},{"osd":2,"up_from":15,"seq":64424509453,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":6236,"kb_used_data":404,"kb_used_omap":0,"kb_used_meta":5824,"kb_avail":20961188,"statfs":{"total":21470642176,"available":21464256512,"internally_reserved":0,"allocated":413696,"data_stored":172137,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5963776},"hb_peers":[0,1,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.85799999999999998}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.71299999999999997}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.49199999999999999}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.90900000000000003}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.89900000000000002}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.77700000000000002}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.85999999999999999}]}]},{"osd":3,"up_from":21,"seq":90194313227,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5728,"kb_used_data":408,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961696,"statfs":{"total":21470642176,"available":21464776704,"internally_reserved":0,"allocated":417792,"data_stored":172431,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.64400000000000002}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.55400000000000005}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.78300000000000003}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.72599999999999998}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.77500000000000002}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.67600000000000005}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.69999999999999996}]}]},{"osd":4,"up_from":25,"seq":107374182409,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5728,"kb_used_data":408,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961696,"statfs":{"total":21470642176,"available":21464776704,"internally_reserved":0,"allocated":417792,"data_stored":172431,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,3,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.67300000000000004}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.35399999999999998}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.70699999999999996}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.51400000000000001}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.65800000000000003}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.56100000000000005}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.32600000000000001}]}]},{"osd":5,"up_from":30,"seq":128849018888,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5724,"kb_used_data":404,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961700,"statfs":{"total":21470642176,"available":21464780800,"internally_reserved":0,"allocated":413696,"data_stored":172137,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,3,4,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.59699999999999998}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.67300000000000004}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.57899999999999996}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.502}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.70199999999999996}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.61799999999999999}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.63300000000000001}]}]}],"pool_statfs":[{"poolid":1,"osd":0,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":6,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":7,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-03-10T13:06:55.063 INFO:tasks.cephadm.ceph_manager.ceph:clean! 2026-03-10T13:06:55.063 INFO:tasks.ceph:Waiting until ceph cluster ceph is healthy... 2026-03-10T13:06:55.063 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy 2026-03-10T13:06:55.063 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph health --format=json 2026-03-10T13:06:55.217 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/mon.a/config 2026-03-10T13:06:55.484 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:06:55 vm08 systemd[1]: Starting Ceph prometheus.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:06:55.574 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T13:06:55.574 INFO:teuthology.orchestra.run.vm00.stdout:{"status":"HEALTH_OK","checks":{},"mutes":[]} 2026-03-10T13:06:55.635 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy done 2026-03-10T13:06:55.635 INFO:tasks.cephadm:Setup complete, yielding 2026-03-10T13:06:55.635 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-10T13:06:55.637 INFO:tasks.cephadm:Running commands on role mon.a host ubuntu@vm00.local 2026-03-10T13:06:55.637 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- bash -c 'ceph config set mgr mgr/cephadm/use_repo_digest false --force' 2026-03-10T13:06:55.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:55 vm08 ceph-mon[49535]: from='client.14556 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T13:06:55.773 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:55 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:55.773 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:55 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/3279473281' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-10T13:06:55.773 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:06:55 vm08 podman[64534]: 2026-03-10 13:06:55.482564944 +0000 UTC m=+0.018271422 container create e06f967e832be536370b603f85761784ec07d538abfefad4a6ab61aac018c85d (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T13:06:55.773 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:06:55 vm08 podman[64534]: 2026-03-10 13:06:55.510064646 +0000 UTC m=+0.045771124 container init e06f967e832be536370b603f85761784ec07d538abfefad4a6ab61aac018c85d (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T13:06:55.773 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:06:55 vm08 podman[64534]: 2026-03-10 13:06:55.512881852 +0000 UTC m=+0.048588330 container start e06f967e832be536370b603f85761784ec07d538abfefad4a6ab61aac018c85d (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T13:06:55.773 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:06:55 vm08 bash[64534]: e06f967e832be536370b603f85761784ec07d538abfefad4a6ab61aac018c85d 2026-03-10T13:06:55.773 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:06:55 vm08 podman[64534]: 2026-03-10 13:06:55.47512476 +0000 UTC m=+0.010831249 image pull 514e6a882f6e74806a5856468489eeff8d7106095557578da96935e4d0ba4d9d quay.io/prometheus/prometheus:v2.33.4 2026-03-10T13:06:55.773 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:06:55 vm08 systemd[1]: Started Ceph prometheus.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:06:55.773 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:06:55 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[64544]: ts=2026-03-10T13:06:55.552Z caller=main.go:475 level=info msg="No time or size retention was set so using the default time retention" duration=15d 2026-03-10T13:06:55.773 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:06:55 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[64544]: ts=2026-03-10T13:06:55.552Z caller=main.go:512 level=info msg="Starting Prometheus" version="(version=2.33.4, branch=HEAD, revision=83032011a5d3e6102624fe58241a374a7201fee8)" 2026-03-10T13:06:55.773 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:06:55 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[64544]: ts=2026-03-10T13:06:55.552Z caller=main.go:517 level=info build_context="(go=go1.17.7, user=root@d13bf69e7be8, date=20220222-16:51:28)" 2026-03-10T13:06:55.773 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:06:55 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[64544]: ts=2026-03-10T13:06:55.552Z caller=main.go:518 level=info host_details="(Linux 5.14.0-686.el9.x86_64 #1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026 x86_64 vm08 (none))" 2026-03-10T13:06:55.773 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:06:55 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[64544]: ts=2026-03-10T13:06:55.552Z caller=main.go:519 level=info fd_limits="(soft=1048576, hard=1048576)" 2026-03-10T13:06:55.773 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:06:55 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[64544]: ts=2026-03-10T13:06:55.552Z caller=main.go:520 level=info vm_limits="(soft=unlimited, hard=unlimited)" 2026-03-10T13:06:55.773 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:06:55 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[64544]: ts=2026-03-10T13:06:55.553Z caller=web.go:570 level=info component=web msg="Start listening for connections" address=:9095 2026-03-10T13:06:55.773 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:06:55 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[64544]: ts=2026-03-10T13:06:55.553Z caller=main.go:923 level=info msg="Starting TSDB ..." 2026-03-10T13:06:55.773 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:06:55 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[64544]: ts=2026-03-10T13:06:55.554Z caller=tls_config.go:195 level=info component=web msg="TLS is disabled." http2=false 2026-03-10T13:06:55.773 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:06:55 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[64544]: ts=2026-03-10T13:06:55.556Z caller=head.go:493 level=info component=tsdb msg="Replaying on-disk memory mappable chunks if any" 2026-03-10T13:06:55.773 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:06:55 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[64544]: ts=2026-03-10T13:06:55.556Z caller=head.go:527 level=info component=tsdb msg="On-disk memory mappable chunks replay completed" duration=1.322µs 2026-03-10T13:06:55.773 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:06:55 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[64544]: ts=2026-03-10T13:06:55.556Z caller=head.go:533 level=info component=tsdb msg="Replaying WAL, this may take a while" 2026-03-10T13:06:55.773 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:06:55 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[64544]: ts=2026-03-10T13:06:55.556Z caller=head.go:604 level=info component=tsdb msg="WAL segment loaded" segment=0 maxSegment=0 2026-03-10T13:06:55.773 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:06:55 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[64544]: ts=2026-03-10T13:06:55.556Z caller=head.go:610 level=info component=tsdb msg="WAL replay completed" checkpoint_replay_duration=99.207µs wal_replay_duration=92.954µs total_replay_duration=204.925µs 2026-03-10T13:06:55.773 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:06:55 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[64544]: ts=2026-03-10T13:06:55.556Z caller=main.go:944 level=info fs_type=XFS_SUPER_MAGIC 2026-03-10T13:06:55.773 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:06:55 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[64544]: ts=2026-03-10T13:06:55.556Z caller=main.go:947 level=info msg="TSDB started" 2026-03-10T13:06:55.773 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:06:55 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[64544]: ts=2026-03-10T13:06:55.556Z caller=main.go:1128 level=info msg="Loading configuration file" filename=/etc/prometheus/prometheus.yml 2026-03-10T13:06:55.773 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:06:55 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[64544]: ts=2026-03-10T13:06:55.565Z caller=main.go:1165 level=info msg="Completed loading of configuration file" filename=/etc/prometheus/prometheus.yml totalDuration=8.978945ms db_storage=470ns remote_storage=1.312µs web_handler=170ns query_engine=281ns scrape=704.283µs scrape_sd=17.062µs notify=401ns notify_sd=822ns rules=8.029851ms 2026-03-10T13:06:55.773 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:06:55 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[64544]: ts=2026-03-10T13:06:55.565Z caller=main.go:896 level=info msg="Server is ready to receive web requests." 2026-03-10T13:06:55.835 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:55 vm00 ceph-mon[47364]: from='client.14556 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T13:06:55.835 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:55 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:55.835 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:55 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/3279473281' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-10T13:06:55.835 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:55 vm00 ceph-mon[51670]: from='client.14556 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T13:06:55.835 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:55 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:55.835 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:55 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/3279473281' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-10T13:06:56.165 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-10T13:06:56.167 INFO:tasks.cephadm:Running commands on role mon.a host ubuntu@vm00.local 2026-03-10T13:06:56.167 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'radosgw-admin realm create --rgw-realm=r --default' 2026-03-10T13:06:57.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:57 vm00 ceph-mon[47364]: Deploying daemon alertmanager.a on vm00 2026-03-10T13:06:57.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:57 vm00 ceph-mon[47364]: pgmap v9: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:06:57.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:57 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/3965954386' entity='client.admin' 2026-03-10T13:06:57.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:57 vm00 ceph-mon[51670]: Deploying daemon alertmanager.a on vm00 2026-03-10T13:06:57.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:57 vm00 ceph-mon[51670]: pgmap v9: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:06:57.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:57 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/3965954386' entity='client.admin' 2026-03-10T13:06:57.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:57 vm08 ceph-mon[49535]: Deploying daemon alertmanager.a on vm00 2026-03-10T13:06:57.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:57 vm08 ceph-mon[49535]: pgmap v9: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:06:57.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:57 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/3965954386' entity='client.admin' 2026-03-10T13:06:58.227 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:06:58.228 INFO:teuthology.orchestra.run.vm00.stdout: "id": "573898be-214e-4fc1-9815-829270b235ea", 2026-03-10T13:06:58.228 INFO:teuthology.orchestra.run.vm00.stdout: "name": "r", 2026-03-10T13:06:58.228 INFO:teuthology.orchestra.run.vm00.stdout: "current_period": "3e918afb-1ad5-4277-bb8c-02c12e64c0af", 2026-03-10T13:06:58.228 INFO:teuthology.orchestra.run.vm00.stdout: "epoch": 1 2026-03-10T13:06:58.228 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:06:58.300 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'radosgw-admin zonegroup create --rgw-zonegroup=default --master --default' 2026-03-10T13:06:58.329 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:58 vm00 ceph-mon[47364]: osdmap e44: 8 total, 8 up, 8 in 2026-03-10T13:06:58.329 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:58 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/3524278420' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-10T13:06:58.329 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:58 vm00 ceph-mon[47364]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-10T13:06:58.329 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:58 vm00 ceph-mon[51670]: osdmap e44: 8 total, 8 up, 8 in 2026-03-10T13:06:58.329 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:58 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/3524278420' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-10T13:06:58.329 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:58 vm00 ceph-mon[51670]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-10T13:06:58.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:58 vm08 ceph-mon[49535]: osdmap e44: 8 total, 8 up, 8 in 2026-03-10T13:06:58.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:58 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/3524278420' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-10T13:06:58.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:58 vm08 ceph-mon[49535]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-10T13:06:58.902 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:06:58.902 INFO:teuthology.orchestra.run.vm00.stdout: "id": "d235d5fe-f4b1-431c-b5d2-0751bc2163c2", 2026-03-10T13:06:58.902 INFO:teuthology.orchestra.run.vm00.stdout: "name": "default", 2026-03-10T13:06:58.903 INFO:teuthology.orchestra.run.vm00.stdout: "api_name": "default", 2026-03-10T13:06:58.903 INFO:teuthology.orchestra.run.vm00.stdout: "is_master": "true", 2026-03-10T13:06:58.903 INFO:teuthology.orchestra.run.vm00.stdout: "endpoints": [], 2026-03-10T13:06:58.903 INFO:teuthology.orchestra.run.vm00.stdout: "hostnames": [], 2026-03-10T13:06:58.903 INFO:teuthology.orchestra.run.vm00.stdout: "hostnames_s3website": [], 2026-03-10T13:06:58.903 INFO:teuthology.orchestra.run.vm00.stdout: "master_zone": "", 2026-03-10T13:06:58.903 INFO:teuthology.orchestra.run.vm00.stdout: "zones": [], 2026-03-10T13:06:58.903 INFO:teuthology.orchestra.run.vm00.stdout: "placement_targets": [], 2026-03-10T13:06:58.903 INFO:teuthology.orchestra.run.vm00.stdout: "default_placement": "", 2026-03-10T13:06:58.903 INFO:teuthology.orchestra.run.vm00.stdout: "realm_id": "573898be-214e-4fc1-9815-829270b235ea", 2026-03-10T13:06:58.903 INFO:teuthology.orchestra.run.vm00.stdout: "sync_policy": { 2026-03-10T13:06:58.903 INFO:teuthology.orchestra.run.vm00.stdout: "groups": [] 2026-03-10T13:06:58.903 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T13:06:58.903 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:06:58.940 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'radosgw-admin zone create --rgw-zonegroup=default --rgw-zone=z --master --default' 2026-03-10T13:06:59.197 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:59 vm00 ceph-mon[47364]: pgmap v11: 33 pgs: 32 unknown, 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:06:59.197 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:59 vm00 ceph-mon[47364]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-03-10T13:06:59.197 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:59 vm00 ceph-mon[47364]: osdmap e45: 8 total, 8 up, 8 in 2026-03-10T13:06:59.197 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:59 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:59.197 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:59 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:59.197 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:59 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:59.197 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:59 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:59.197 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:59 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T13:06:59.197 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:59 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:59.197 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:06:59 vm00 ceph-mon[47364]: osdmap e46: 8 total, 8 up, 8 in 2026-03-10T13:06:59.198 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:59 vm00 ceph-mon[51670]: pgmap v11: 33 pgs: 32 unknown, 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:06:59.198 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:59 vm00 ceph-mon[51670]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-03-10T13:06:59.198 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:59 vm00 ceph-mon[51670]: osdmap e45: 8 total, 8 up, 8 in 2026-03-10T13:06:59.198 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:59 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:59.198 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:59 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:59.198 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:59 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:59.198 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:59 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:59.198 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:59 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T13:06:59.198 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:59 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:59.198 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:06:59 vm00 ceph-mon[51670]: osdmap e46: 8 total, 8 up, 8 in 2026-03-10T13:06:59.437 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:06:59.437 INFO:teuthology.orchestra.run.vm00.stdout: "id": "1f9bfe95-5789-43d1-ad17-fde3a53c812a", 2026-03-10T13:06:59.437 INFO:teuthology.orchestra.run.vm00.stdout: "name": "z", 2026-03-10T13:06:59.437 INFO:teuthology.orchestra.run.vm00.stdout: "domain_root": "z.rgw.meta:root", 2026-03-10T13:06:59.437 INFO:teuthology.orchestra.run.vm00.stdout: "control_pool": "z.rgw.control", 2026-03-10T13:06:59.437 INFO:teuthology.orchestra.run.vm00.stdout: "gc_pool": "z.rgw.log:gc", 2026-03-10T13:06:59.437 INFO:teuthology.orchestra.run.vm00.stdout: "lc_pool": "z.rgw.log:lc", 2026-03-10T13:06:59.437 INFO:teuthology.orchestra.run.vm00.stdout: "log_pool": "z.rgw.log", 2026-03-10T13:06:59.437 INFO:teuthology.orchestra.run.vm00.stdout: "intent_log_pool": "z.rgw.log:intent", 2026-03-10T13:06:59.437 INFO:teuthology.orchestra.run.vm00.stdout: "usage_log_pool": "z.rgw.log:usage", 2026-03-10T13:06:59.437 INFO:teuthology.orchestra.run.vm00.stdout: "roles_pool": "z.rgw.meta:roles", 2026-03-10T13:06:59.437 INFO:teuthology.orchestra.run.vm00.stdout: "reshard_pool": "z.rgw.log:reshard", 2026-03-10T13:06:59.437 INFO:teuthology.orchestra.run.vm00.stdout: "user_keys_pool": "z.rgw.meta:users.keys", 2026-03-10T13:06:59.437 INFO:teuthology.orchestra.run.vm00.stdout: "user_email_pool": "z.rgw.meta:users.email", 2026-03-10T13:06:59.437 INFO:teuthology.orchestra.run.vm00.stdout: "user_swift_pool": "z.rgw.meta:users.swift", 2026-03-10T13:06:59.437 INFO:teuthology.orchestra.run.vm00.stdout: "user_uid_pool": "z.rgw.meta:users.uid", 2026-03-10T13:06:59.437 INFO:teuthology.orchestra.run.vm00.stdout: "otp_pool": "z.rgw.otp", 2026-03-10T13:06:59.437 INFO:teuthology.orchestra.run.vm00.stdout: "system_key": { 2026-03-10T13:06:59.437 INFO:teuthology.orchestra.run.vm00.stdout: "access_key": "", 2026-03-10T13:06:59.437 INFO:teuthology.orchestra.run.vm00.stdout: "secret_key": "" 2026-03-10T13:06:59.437 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:06:59.437 INFO:teuthology.orchestra.run.vm00.stdout: "placement_pools": [ 2026-03-10T13:06:59.437 INFO:teuthology.orchestra.run.vm00.stdout: { 2026-03-10T13:06:59.437 INFO:teuthology.orchestra.run.vm00.stdout: "key": "default-placement", 2026-03-10T13:06:59.437 INFO:teuthology.orchestra.run.vm00.stdout: "val": { 2026-03-10T13:06:59.437 INFO:teuthology.orchestra.run.vm00.stdout: "index_pool": "z.rgw.buckets.index", 2026-03-10T13:06:59.437 INFO:teuthology.orchestra.run.vm00.stdout: "storage_classes": { 2026-03-10T13:06:59.437 INFO:teuthology.orchestra.run.vm00.stdout: "STANDARD": { 2026-03-10T13:06:59.437 INFO:teuthology.orchestra.run.vm00.stdout: "data_pool": "z.rgw.buckets.data" 2026-03-10T13:06:59.437 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T13:06:59.437 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:06:59.437 INFO:teuthology.orchestra.run.vm00.stdout: "data_extra_pool": "z.rgw.buckets.non-ec", 2026-03-10T13:06:59.437 INFO:teuthology.orchestra.run.vm00.stdout: "index_type": 0 2026-03-10T13:06:59.437 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T13:06:59.438 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T13:06:59.438 INFO:teuthology.orchestra.run.vm00.stdout: ], 2026-03-10T13:06:59.438 INFO:teuthology.orchestra.run.vm00.stdout: "realm_id": "573898be-214e-4fc1-9815-829270b235ea", 2026-03-10T13:06:59.438 INFO:teuthology.orchestra.run.vm00.stdout: "notif_pool": "z.rgw.log:notif" 2026-03-10T13:06:59.438 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:06:59.496 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'radosgw-admin period update --rgw-realm=r --commit' 2026-03-10T13:06:59.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:59 vm08 ceph-mon[49535]: pgmap v11: 33 pgs: 32 unknown, 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:06:59.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:59 vm08 ceph-mon[49535]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-03-10T13:06:59.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:59 vm08 ceph-mon[49535]: osdmap e45: 8 total, 8 up, 8 in 2026-03-10T13:06:59.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:59 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:59.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:59 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:59.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:59 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:59.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:59 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:59.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:59 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T13:06:59.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:59 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:06:59.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:06:59 vm08 ceph-mon[49535]: osdmap e46: 8 total, 8 up, 8 in 2026-03-10T13:07:00.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:00 vm00 ceph-mon[47364]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T13:07:00.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:00 vm00 ceph-mon[47364]: Deploying daemon grafana.a on vm08 2026-03-10T13:07:00.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:00 vm00 ceph-mon[51670]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T13:07:00.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:00 vm00 ceph-mon[51670]: Deploying daemon grafana.a on vm08 2026-03-10T13:07:00.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:00 vm08 ceph-mon[49535]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T13:07:00.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:00 vm08 ceph-mon[49535]: Deploying daemon grafana.a on vm08 2026-03-10T13:07:01.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:07:00 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[69533]: level=info ts=2026-03-10T13:07:00.850Z caller=cluster.go:696 component=cluster msg="gossip not settled" polls=0 before=0 now=1 elapsed=2.000909734s 2026-03-10T13:07:01.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:01 vm00 ceph-mon[47364]: pgmap v14: 33 pgs: 32 unknown, 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:07:01.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:01 vm00 ceph-mon[47364]: osdmap e47: 8 total, 8 up, 8 in 2026-03-10T13:07:01.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:01 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/1547625110' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]: dispatch 2026-03-10T13:07:01.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:01 vm00 ceph-mon[47364]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]: dispatch 2026-03-10T13:07:01.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:01 vm00 ceph-mon[51670]: pgmap v14: 33 pgs: 32 unknown, 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:07:01.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:01 vm00 ceph-mon[51670]: osdmap e47: 8 total, 8 up, 8 in 2026-03-10T13:07:01.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:01 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/1547625110' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]: dispatch 2026-03-10T13:07:01.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:01 vm00 ceph-mon[51670]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]: dispatch 2026-03-10T13:07:01.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:01 vm08 ceph-mon[49535]: pgmap v14: 33 pgs: 32 unknown, 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:07:01.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:01 vm08 ceph-mon[49535]: osdmap e47: 8 total, 8 up, 8 in 2026-03-10T13:07:01.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:01 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/1547625110' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]: dispatch 2026-03-10T13:07:01.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:01 vm08 ceph-mon[49535]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]: dispatch 2026-03-10T13:07:03.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:02 vm00 ceph-mon[47364]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]': finished 2026-03-10T13:07:03.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:02 vm00 ceph-mon[47364]: osdmap e48: 8 total, 8 up, 8 in 2026-03-10T13:07:03.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:02 vm00 ceph-mon[51670]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]': finished 2026-03-10T13:07:03.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:02 vm00 ceph-mon[51670]: osdmap e48: 8 total, 8 up, 8 in 2026-03-10T13:07:03.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:02 vm08 ceph-mon[49535]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]': finished 2026-03-10T13:07:03.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:02 vm08 ceph-mon[49535]: osdmap e48: 8 total, 8 up, 8 in 2026-03-10T13:07:03.503 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:07:03 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:07:03] "GET /metrics HTTP/1.1" 200 192182 "" "Prometheus/2.33.4" 2026-03-10T13:07:04.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:03 vm00 ceph-mon[47364]: pgmap v17: 65 pgs: 32 unknown, 33 active+clean; 451 KiB data, 49 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 4.0 KiB/s wr, 6 op/s 2026-03-10T13:07:04.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:03 vm00 ceph-mon[47364]: osdmap e49: 8 total, 8 up, 8 in 2026-03-10T13:07:04.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:03 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/1547625110' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]: dispatch 2026-03-10T13:07:04.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:03 vm00 ceph-mon[47364]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]: dispatch 2026-03-10T13:07:04.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:03 vm00 ceph-mon[51670]: pgmap v17: 65 pgs: 32 unknown, 33 active+clean; 451 KiB data, 49 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 4.0 KiB/s wr, 6 op/s 2026-03-10T13:07:04.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:03 vm00 ceph-mon[51670]: osdmap e49: 8 total, 8 up, 8 in 2026-03-10T13:07:04.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:03 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/1547625110' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]: dispatch 2026-03-10T13:07:04.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:03 vm00 ceph-mon[51670]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]: dispatch 2026-03-10T13:07:04.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:03 vm08 ceph-mon[49535]: pgmap v17: 65 pgs: 32 unknown, 33 active+clean; 451 KiB data, 49 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 4.0 KiB/s wr, 6 op/s 2026-03-10T13:07:04.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:03 vm08 ceph-mon[49535]: osdmap e49: 8 total, 8 up, 8 in 2026-03-10T13:07:04.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:03 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/1547625110' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]: dispatch 2026-03-10T13:07:04.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:03 vm08 ceph-mon[49535]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]: dispatch 2026-03-10T13:07:05.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:04 vm00 ceph-mon[47364]: pgmap v19: 97 pgs: 64 unknown, 33 active+clean; 451 KiB data, 49 MiB used, 160 GiB / 160 GiB avail; 1.6 KiB/s rd, 3.6 KiB/s wr, 6 op/s 2026-03-10T13:07:05.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:04 vm00 ceph-mon[47364]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]': finished 2026-03-10T13:07:05.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:04 vm00 ceph-mon[47364]: osdmap e50: 8 total, 8 up, 8 in 2026-03-10T13:07:05.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:04 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:05.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:04 vm00 ceph-mon[51670]: pgmap v19: 97 pgs: 64 unknown, 33 active+clean; 451 KiB data, 49 MiB used, 160 GiB / 160 GiB avail; 1.6 KiB/s rd, 3.6 KiB/s wr, 6 op/s 2026-03-10T13:07:05.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:04 vm00 ceph-mon[51670]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]': finished 2026-03-10T13:07:05.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:04 vm00 ceph-mon[51670]: osdmap e50: 8 total, 8 up, 8 in 2026-03-10T13:07:05.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:04 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:05.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:04 vm08 ceph-mon[49535]: pgmap v19: 97 pgs: 64 unknown, 33 active+clean; 451 KiB data, 49 MiB used, 160 GiB / 160 GiB avail; 1.6 KiB/s rd, 3.6 KiB/s wr, 6 op/s 2026-03-10T13:07:05.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:04 vm08 ceph-mon[49535]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]': finished 2026-03-10T13:07:05.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:04 vm08 ceph-mon[49535]: osdmap e50: 8 total, 8 up, 8 in 2026-03-10T13:07:05.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:04 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:06.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:05 vm00 ceph-mon[47364]: osdmap e51: 8 total, 8 up, 8 in 2026-03-10T13:07:06.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:05 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/1164222898' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]: dispatch 2026-03-10T13:07:06.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:05 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/1164222898' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]': finished 2026-03-10T13:07:06.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:05 vm00 ceph-mon[47364]: osdmap e52: 8 total, 8 up, 8 in 2026-03-10T13:07:06.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:05 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/1164222898' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-10T13:07:06.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:05 vm00 ceph-mon[51670]: osdmap e51: 8 total, 8 up, 8 in 2026-03-10T13:07:06.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:05 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/1164222898' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]: dispatch 2026-03-10T13:07:06.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:05 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/1164222898' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]': finished 2026-03-10T13:07:06.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:05 vm00 ceph-mon[51670]: osdmap e52: 8 total, 8 up, 8 in 2026-03-10T13:07:06.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:05 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/1164222898' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-10T13:07:06.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:05 vm08 ceph-mon[49535]: osdmap e51: 8 total, 8 up, 8 in 2026-03-10T13:07:06.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:05 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/1164222898' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]: dispatch 2026-03-10T13:07:06.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:05 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/1164222898' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]': finished 2026-03-10T13:07:06.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:05 vm08 ceph-mon[49535]: osdmap e52: 8 total, 8 up, 8 in 2026-03-10T13:07:06.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:05 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/1164222898' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-10T13:07:06.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:06 vm08 ceph-mon[49535]: pgmap v22: 129 pgs: 32 unknown, 97 active+clean; 451 KiB data, 51 MiB used, 160 GiB / 160 GiB avail; 5.7 KiB/s rd, 485 B/s wr, 6 op/s 2026-03-10T13:07:06.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:06 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/1164222898' entity='client.admin' cmd='[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-10T13:07:06.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:06 vm08 ceph-mon[49535]: osdmap e53: 8 total, 8 up, 8 in 2026-03-10T13:07:06.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:06 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/1164222898' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]: dispatch 2026-03-10T13:07:06.771 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:07:06 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:07:06] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T13:07:07.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:06 vm00 ceph-mon[47364]: pgmap v22: 129 pgs: 32 unknown, 97 active+clean; 451 KiB data, 51 MiB used, 160 GiB / 160 GiB avail; 5.7 KiB/s rd, 485 B/s wr, 6 op/s 2026-03-10T13:07:07.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:06 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/1164222898' entity='client.admin' cmd='[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-10T13:07:07.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:06 vm00 ceph-mon[47364]: osdmap e53: 8 total, 8 up, 8 in 2026-03-10T13:07:07.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:06 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/1164222898' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]: dispatch 2026-03-10T13:07:07.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:06 vm00 ceph-mon[51670]: pgmap v22: 129 pgs: 32 unknown, 97 active+clean; 451 KiB data, 51 MiB used, 160 GiB / 160 GiB avail; 5.7 KiB/s rd, 485 B/s wr, 6 op/s 2026-03-10T13:07:07.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:06 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/1164222898' entity='client.admin' cmd='[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-10T13:07:07.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:06 vm00 ceph-mon[51670]: osdmap e53: 8 total, 8 up, 8 in 2026-03-10T13:07:07.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:06 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/1164222898' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]: dispatch 2026-03-10T13:07:07.905 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:07:07.905 INFO:teuthology.orchestra.run.vm00.stdout: "id": "d27c89b4-5f23-4959-ab75-cd8bd71bbc3d", 2026-03-10T13:07:07.905 INFO:teuthology.orchestra.run.vm00.stdout: "epoch": 1, 2026-03-10T13:07:07.905 INFO:teuthology.orchestra.run.vm00.stdout: "predecessor_uuid": "3e918afb-1ad5-4277-bb8c-02c12e64c0af", 2026-03-10T13:07:07.905 INFO:teuthology.orchestra.run.vm00.stdout: "sync_status": [], 2026-03-10T13:07:07.905 INFO:teuthology.orchestra.run.vm00.stdout: "period_map": { 2026-03-10T13:07:07.905 INFO:teuthology.orchestra.run.vm00.stdout: "id": "d27c89b4-5f23-4959-ab75-cd8bd71bbc3d", 2026-03-10T13:07:07.905 INFO:teuthology.orchestra.run.vm00.stdout: "zonegroups": [ 2026-03-10T13:07:07.905 INFO:teuthology.orchestra.run.vm00.stdout: { 2026-03-10T13:07:07.905 INFO:teuthology.orchestra.run.vm00.stdout: "id": "d235d5fe-f4b1-431c-b5d2-0751bc2163c2", 2026-03-10T13:07:07.905 INFO:teuthology.orchestra.run.vm00.stdout: "name": "default", 2026-03-10T13:07:07.905 INFO:teuthology.orchestra.run.vm00.stdout: "api_name": "default", 2026-03-10T13:07:07.905 INFO:teuthology.orchestra.run.vm00.stdout: "is_master": "true", 2026-03-10T13:07:07.905 INFO:teuthology.orchestra.run.vm00.stdout: "endpoints": [], 2026-03-10T13:07:07.905 INFO:teuthology.orchestra.run.vm00.stdout: "hostnames": [], 2026-03-10T13:07:07.905 INFO:teuthology.orchestra.run.vm00.stdout: "hostnames_s3website": [], 2026-03-10T13:07:07.905 INFO:teuthology.orchestra.run.vm00.stdout: "master_zone": "1f9bfe95-5789-43d1-ad17-fde3a53c812a", 2026-03-10T13:07:07.905 INFO:teuthology.orchestra.run.vm00.stdout: "zones": [ 2026-03-10T13:07:07.905 INFO:teuthology.orchestra.run.vm00.stdout: { 2026-03-10T13:07:07.905 INFO:teuthology.orchestra.run.vm00.stdout: "id": "1f9bfe95-5789-43d1-ad17-fde3a53c812a", 2026-03-10T13:07:07.905 INFO:teuthology.orchestra.run.vm00.stdout: "name": "z", 2026-03-10T13:07:07.905 INFO:teuthology.orchestra.run.vm00.stdout: "endpoints": [], 2026-03-10T13:07:07.905 INFO:teuthology.orchestra.run.vm00.stdout: "log_meta": "false", 2026-03-10T13:07:07.905 INFO:teuthology.orchestra.run.vm00.stdout: "log_data": "false", 2026-03-10T13:07:07.905 INFO:teuthology.orchestra.run.vm00.stdout: "bucket_index_max_shards": 11, 2026-03-10T13:07:07.905 INFO:teuthology.orchestra.run.vm00.stdout: "read_only": "false", 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: "tier_type": "", 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: "sync_from_all": "true", 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: "sync_from": [], 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: "redirect_zone": "" 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: ], 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: "placement_targets": [ 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: { 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: "name": "default-placement", 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: "tags": [], 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: "storage_classes": [ 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: "STANDARD" 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: ] 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: ], 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: "default_placement": "default-placement", 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: "realm_id": "573898be-214e-4fc1-9815-829270b235ea", 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: "sync_policy": { 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: "groups": [] 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: ], 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: "short_zone_ids": [ 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: { 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: "key": "1f9bfe95-5789-43d1-ad17-fde3a53c812a", 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: "val": 3248771502 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: ] 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: "master_zonegroup": "d235d5fe-f4b1-431c-b5d2-0751bc2163c2", 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: "master_zone": "1f9bfe95-5789-43d1-ad17-fde3a53c812a", 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: "period_config": { 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: "bucket_quota": { 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: "enabled": false, 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: "check_on_raw": false, 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: "max_size": -1, 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: "max_size_kb": 0, 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: "max_objects": -1 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: "user_quota": { 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: "enabled": false, 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: "check_on_raw": false, 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: "max_size": -1, 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: "max_size_kb": 0, 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: "max_objects": -1 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: "user_ratelimit": { 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: "max_read_ops": 0, 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: "max_write_ops": 0, 2026-03-10T13:07:07.906 INFO:teuthology.orchestra.run.vm00.stdout: "max_read_bytes": 0, 2026-03-10T13:07:07.907 INFO:teuthology.orchestra.run.vm00.stdout: "max_write_bytes": 0, 2026-03-10T13:07:07.907 INFO:teuthology.orchestra.run.vm00.stdout: "enabled": false 2026-03-10T13:07:07.907 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:07:07.907 INFO:teuthology.orchestra.run.vm00.stdout: "bucket_ratelimit": { 2026-03-10T13:07:07.907 INFO:teuthology.orchestra.run.vm00.stdout: "max_read_ops": 0, 2026-03-10T13:07:07.907 INFO:teuthology.orchestra.run.vm00.stdout: "max_write_ops": 0, 2026-03-10T13:07:07.907 INFO:teuthology.orchestra.run.vm00.stdout: "max_read_bytes": 0, 2026-03-10T13:07:07.907 INFO:teuthology.orchestra.run.vm00.stdout: "max_write_bytes": 0, 2026-03-10T13:07:07.907 INFO:teuthology.orchestra.run.vm00.stdout: "enabled": false 2026-03-10T13:07:07.907 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:07:07.907 INFO:teuthology.orchestra.run.vm00.stdout: "anonymous_ratelimit": { 2026-03-10T13:07:07.907 INFO:teuthology.orchestra.run.vm00.stdout: "max_read_ops": 0, 2026-03-10T13:07:07.907 INFO:teuthology.orchestra.run.vm00.stdout: "max_write_ops": 0, 2026-03-10T13:07:07.907 INFO:teuthology.orchestra.run.vm00.stdout: "max_read_bytes": 0, 2026-03-10T13:07:07.907 INFO:teuthology.orchestra.run.vm00.stdout: "max_write_bytes": 0, 2026-03-10T13:07:07.907 INFO:teuthology.orchestra.run.vm00.stdout: "enabled": false 2026-03-10T13:07:07.907 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T13:07:07.907 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:07:07.907 INFO:teuthology.orchestra.run.vm00.stdout: "realm_id": "573898be-214e-4fc1-9815-829270b235ea", 2026-03-10T13:07:07.907 INFO:teuthology.orchestra.run.vm00.stdout: "realm_name": "r", 2026-03-10T13:07:07.907 INFO:teuthology.orchestra.run.vm00.stdout: "realm_epoch": 2 2026-03-10T13:07:07.907 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:07:07.964 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch apply rgw foo --realm r --zone z --placement=2 --port=8000' 2026-03-10T13:07:08.465 INFO:teuthology.orchestra.run.vm00.stdout:Scheduled rgw.foo update... 2026-03-10T13:07:08.512 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph osd pool create foo' 2026-03-10T13:07:08.717 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:08 vm00 ceph-mon[47364]: pgmap v25: 129 pgs: 32 unknown, 97 active+clean; 451 KiB data, 51 MiB used, 160 GiB / 160 GiB avail; 6.0 KiB/s rd, 511 B/s wr, 7 op/s 2026-03-10T13:07:08.717 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:08 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/1164222898' entity='client.admin' cmd='[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]': finished 2026-03-10T13:07:08.717 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:08 vm00 ceph-mon[47364]: osdmap e54: 8 total, 8 up, 8 in 2026-03-10T13:07:08.717 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:08 vm00 ceph-mon[47364]: from='client.24464 -' entity='client.admin' cmd=[{"prefix": "orch apply rgw", "svc_id": "foo", "realm": "r", "zone": "z", "placement": "2", "port": 8000, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:07:08.717 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:08 vm00 ceph-mon[47364]: Saving service rgw.foo spec with placement count:2 2026-03-10T13:07:08.717 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:08 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:08.717 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:08 vm00 ceph-mon[51670]: pgmap v25: 129 pgs: 32 unknown, 97 active+clean; 451 KiB data, 51 MiB used, 160 GiB / 160 GiB avail; 6.0 KiB/s rd, 511 B/s wr, 7 op/s 2026-03-10T13:07:08.717 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:08 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/1164222898' entity='client.admin' cmd='[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]': finished 2026-03-10T13:07:08.717 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:08 vm00 ceph-mon[51670]: osdmap e54: 8 total, 8 up, 8 in 2026-03-10T13:07:08.717 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:08 vm00 ceph-mon[51670]: from='client.24464 -' entity='client.admin' cmd=[{"prefix": "orch apply rgw", "svc_id": "foo", "realm": "r", "zone": "z", "placement": "2", "port": 8000, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:07:08.717 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:08 vm00 ceph-mon[51670]: Saving service rgw.foo spec with placement count:2 2026-03-10T13:07:08.717 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:08 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:09.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:07:08 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[69533]: level=info ts=2026-03-10T13:07:08.853Z caller=cluster.go:688 component=cluster msg="gossip settled; proceeding" elapsed=10.00478963s 2026-03-10T13:07:09.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:08 vm08 ceph-mon[49535]: pgmap v25: 129 pgs: 32 unknown, 97 active+clean; 451 KiB data, 51 MiB used, 160 GiB / 160 GiB avail; 6.0 KiB/s rd, 511 B/s wr, 7 op/s 2026-03-10T13:07:09.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:08 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/1164222898' entity='client.admin' cmd='[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]': finished 2026-03-10T13:07:09.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:08 vm08 ceph-mon[49535]: osdmap e54: 8 total, 8 up, 8 in 2026-03-10T13:07:09.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:08 vm08 ceph-mon[49535]: from='client.24464 -' entity='client.admin' cmd=[{"prefix": "orch apply rgw", "svc_id": "foo", "realm": "r", "zone": "z", "placement": "2", "port": 8000, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:07:09.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:08 vm08 ceph-mon[49535]: Saving service rgw.foo spec with placement count:2 2026-03-10T13:07:09.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:08 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:09.987 INFO:teuthology.orchestra.run.vm00.stderr:pool 'foo' created 2026-03-10T13:07:10.069 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'rbd pool init foo' 2026-03-10T13:07:10.235 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:09 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2982873014' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "foo"}]: dispatch 2026-03-10T13:07:10.235 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:09 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2982873014' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "foo"}]: dispatch 2026-03-10T13:07:10.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:09 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/2982873014' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "foo"}]: dispatch 2026-03-10T13:07:11.254 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 systemd[1]: Starting Ceph grafana.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:07:11.254 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:11 vm08 ceph-mon[49535]: pgmap v27: 129 pgs: 32 unknown, 97 active+clean; 451 KiB data, 51 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:07:11.254 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:11 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/2982873014' entity='client.admin' cmd='[{"prefix": "osd pool create", "pool": "foo"}]': finished 2026-03-10T13:07:11.254 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:11 vm08 ceph-mon[49535]: osdmap e55: 8 total, 8 up, 8 in 2026-03-10T13:07:11.254 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:11 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/2328262168' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]: dispatch 2026-03-10T13:07:11.254 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:11 vm08 ceph-mon[49535]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]: dispatch 2026-03-10T13:07:11.357 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:11 vm00 ceph-mon[47364]: pgmap v27: 129 pgs: 32 unknown, 97 active+clean; 451 KiB data, 51 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:07:11.358 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:11 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2982873014' entity='client.admin' cmd='[{"prefix": "osd pool create", "pool": "foo"}]': finished 2026-03-10T13:07:11.358 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:11 vm00 ceph-mon[47364]: osdmap e55: 8 total, 8 up, 8 in 2026-03-10T13:07:11.358 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:11 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2328262168' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]: dispatch 2026-03-10T13:07:11.358 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:11 vm00 ceph-mon[47364]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]: dispatch 2026-03-10T13:07:11.358 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:11 vm00 ceph-mon[51670]: pgmap v27: 129 pgs: 32 unknown, 97 active+clean; 451 KiB data, 51 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:07:11.358 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:11 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2982873014' entity='client.admin' cmd='[{"prefix": "osd pool create", "pool": "foo"}]': finished 2026-03-10T13:07:11.358 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:11 vm00 ceph-mon[51670]: osdmap e55: 8 total, 8 up, 8 in 2026-03-10T13:07:11.358 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:11 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2328262168' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]: dispatch 2026-03-10T13:07:11.358 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:11 vm00 ceph-mon[51670]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]: dispatch 2026-03-10T13:07:11.505 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 podman[64814]: 2026-03-10 13:07:11.252009922 +0000 UTC m=+0.015706099 container create cc6207fccfd03ccb7b907a7a7cd76e80298b7fa3edb400fca9f81ca579215a3c (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a, release=236.1648460182, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, name=ubi8, com.redhat.component=ubi8-container, description=Ceph Grafana Container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, summary=Grafana Container configured for Ceph mgr/dashboard integration, io.openshift.expose-services=, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, io.openshift.tags=base rhel8, maintainer=Paul Cuzner , version=8.5, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, vendor=Red Hat, Inc., architecture=x86_64, io.buildah.version=1.24.2, distribution-scope=public, build-date=2022-03-28T10:36:18.413762, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, io.k8s.display-name=Red Hat Universal Base Image 8) 2026-03-10T13:07:11.505 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 podman[64814]: 2026-03-10 13:07:11.285981759 +0000 UTC m=+0.049677926 container init cc6207fccfd03ccb7b907a7a7cd76e80298b7fa3edb400fca9f81ca579215a3c (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a, maintainer=Paul Cuzner , description=Ceph Grafana Container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.tags=base rhel8, io.k8s.display-name=Red Hat Universal Base Image 8, version=8.5, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, vendor=Red Hat, Inc., io.buildah.version=1.24.2, release=236.1648460182, architecture=x86_64, summary=Grafana Container configured for Ceph mgr/dashboard integration, build-date=2022-03-28T10:36:18.413762, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, name=ubi8, com.redhat.component=ubi8-container, io.openshift.expose-services=, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, vcs-type=git, distribution-scope=public) 2026-03-10T13:07:11.505 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 podman[64814]: 2026-03-10 13:07:11.290573599 +0000 UTC m=+0.054269776 container start cc6207fccfd03ccb7b907a7a7cd76e80298b7fa3edb400fca9f81ca579215a3c (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a, summary=Grafana Container configured for Ceph mgr/dashboard integration, build-date=2022-03-28T10:36:18.413762, vendor=Red Hat, Inc., com.redhat.component=ubi8-container, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, io.buildah.version=1.24.2, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Paul Cuzner , io.openshift.expose-services=, description=Ceph Grafana Container, vcs-type=git, io.openshift.tags=base rhel8, release=236.1648460182, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, name=ubi8, architecture=x86_64, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, distribution-scope=public, version=8.5, io.k8s.display-name=Red Hat Universal Base Image 8, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com) 2026-03-10T13:07:11.505 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 bash[64814]: cc6207fccfd03ccb7b907a7a7cd76e80298b7fa3edb400fca9f81ca579215a3c 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 podman[64814]: 2026-03-10 13:07:11.245899999 +0000 UTC m=+0.009596187 image pull dad864ee21e98e69f4029d1e417aa085001566be0d322fbc75bc6f29b0050c01 quay.io/ceph/ceph-grafana:8.3.5 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 systemd[1]: Started Ceph grafana.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="The state of unified alerting is still not defined. The decision will be made during as we run the database migrations" logger=settings 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=warn msg="falling back to legacy setting of 'min_interval_seconds'; please use the configuration option in the `unified_alerting` section if Grafana 8 alerts are enabled." logger=settings 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Config loaded from" logger=settings file=/usr/share/grafana/conf/defaults.ini 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Config loaded from" logger=settings file=/etc/grafana/grafana.ini 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_DATA=/var/lib/grafana" 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_LOGS=/var/log/grafana" 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_PLUGINS=/var/lib/grafana/plugins" 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_PROVISIONING=/etc/grafana/provisioning" 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Path Home" logger=settings path=/usr/share/grafana 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Path Data" logger=settings path=/var/lib/grafana 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Path Logs" logger=settings path=/var/log/grafana 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Path Plugins" logger=settings path=/var/lib/grafana/plugins 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Path Provisioning" logger=settings path=/etc/grafana/provisioning 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="App mode production" logger=settings 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Connecting to DB" logger=sqlstore dbtype=sqlite3 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=warn msg="SQLite database file has broader permissions than it should" logger=sqlstore path=/var/lib/grafana/grafana.db mode=-rw-r--r-- expected=-rw-r----- 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Starting DB migrations" logger=migrator 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create migration_log table" 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create user table" 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index user.login" 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index user.email" 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_user_login - v1" 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_user_email - v1" 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table user to user_v1 - v1" 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create user table v2" 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_user_login - v2" 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_user_email - v2" 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="copy data_source v1 to v2" 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old table user_v1" 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add column help_flags1 to user table" 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Update user table charset" 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add last_seen_at column to user" 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add missing user data" 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add is_disabled column to user" 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add index user.login/user.email" 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add is_service_account column to user" 2026-03-10T13:07:11.506 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create temp user table v1-7" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_email - v1-7" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_org_id - v1-7" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_code - v1-7" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_status - v1-7" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Update temp_user table charset" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_temp_user_email - v1" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_temp_user_org_id - v1" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_temp_user_code - v1" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_temp_user_status - v1" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table temp_user to temp_user_tmp_qwerty - v1" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create temp_user v2" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_email - v2" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_org_id - v2" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_code - v2" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_status - v2" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="copy temp_user v1 to v2" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="drop temp_user_tmp_qwerty" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Set created for temp users that will otherwise prematurely expire" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create star table" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index star.user_id_dashboard_id" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create org table v1" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_org_name - v1" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create org_user table v1" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_org_user_org_id - v1" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_org_user_org_id_user_id - v1" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_org_user_user_id - v1" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Update org table charset" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Update org_user table charset" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Migrate all Read Only Viewers to Viewers" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard table" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard.account_id" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index dashboard_account_id_slug" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_tag table" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index dashboard_tag.dasboard_id_term" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_dashboard_tag_dashboard_id_term - v1" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table dashboard to dashboard_v1 - v1" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard v2" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_dashboard_org_id - v2" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_dashboard_org_id_slug - v2" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="copy dashboard v1 to v2" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="drop table dashboard_v1" 2026-03-10T13:07:11.507 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="alter dashboard.data to mediumtext v1" 2026-03-10T13:07:11.508 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add column updated_by in dashboard - v2" 2026-03-10T13:07:11.508 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add column created_by in dashboard - v2" 2026-03-10T13:07:11.508 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add column gnetId in dashboard" 2026-03-10T13:07:11.508 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for gnetId in dashboard" 2026-03-10T13:07:11.508 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add column plugin_id in dashboard" 2026-03-10T13:07:11.508 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for plugin_id in dashboard" 2026-03-10T13:07:11.508 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for dashboard_id in dashboard_tag" 2026-03-10T13:07:11.508 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Update dashboard table charset" 2026-03-10T13:07:11.508 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Update dashboard_tag table charset" 2026-03-10T13:07:11.508 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add column folder_id in dashboard" 2026-03-10T13:07:11.508 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add column isFolder in dashboard" 2026-03-10T13:07:11.508 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add column has_acl in dashboard" 2026-03-10T13:07:11.508 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add column uid in dashboard" 2026-03-10T13:07:11.508 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Update uid column values in dashboard" 2026-03-10T13:07:11.508 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index dashboard_org_id_uid" 2026-03-10T13:07:11.508 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Remove unique index org_id_slug" 2026-03-10T13:07:11.508 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Update dashboard title length" 2026-03-10T13:07:11.508 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index for dashboard_org_id_title_folder_id" 2026-03-10T13:07:11.508 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_provisioning" 2026-03-10T13:07:11.508 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table dashboard_provisioning to dashboard_provisioning_tmp_qwerty - v1" 2026-03-10T13:07:11.508 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_provisioning v2" 2026-03-10T13:07:11.508 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_dashboard_provisioning_dashboard_id - v2" 2026-03-10T13:07:11.508 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_dashboard_provisioning_dashboard_id_name - v2" 2026-03-10T13:07:11.508 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="copy dashboard_provisioning v1 to v2" 2026-03-10T13:07:11.508 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="drop dashboard_provisioning_tmp_qwerty" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add check_sum column" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for dashboard_title" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="delete tags for deleted dashboards" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="delete stars for deleted dashboards" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for dashboard_is_folder" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create data_source table" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index data_source.account_id" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index data_source.account_id_name" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_data_source_account_id - v1" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_data_source_account_id_name - v1" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table data_source to data_source_v1 - v1" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create data_source table v2" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_data_source_org_id - v2" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_data_source_org_id_name - v2" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="copy data_source v1 to v2" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old table data_source_v1 #2" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add column with_credentials" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add secure json data column" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Update data_source table charset" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Update initial version to 1" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add read_only data column" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Migrate logging ds to loki ds" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Update json_data with nulls" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add uid column" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Update uid value" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index datasource_org_id_uid" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index datasource_org_id_is_default" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create api_key table" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index api_key.account_id" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index api_key.key" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index api_key.account_id_name" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_api_key_account_id - v1" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_api_key_key - v1" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_api_key_account_id_name - v1" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table api_key to api_key_v1 - v1" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create api_key table v2" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_api_key_org_id - v2" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_api_key_key - v2" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_api_key_org_id_name - v2" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="copy api_key v1 to v2" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old table api_key_v1" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Update api_key table charset" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add expires to api_key table" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add service account foreign key" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_snapshot table v4" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="drop table dashboard_snapshot_v4 #1" 2026-03-10T13:07:11.509 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_snapshot table v5 #2" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_dashboard_snapshot_key - v5" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_dashboard_snapshot_delete_key - v5" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_dashboard_snapshot_user_id - v5" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="alter dashboard_snapshot to mediumtext v2" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Update dashboard_snapshot table charset" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add column external_delete_url to dashboard_snapshots table" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add encrypted dashboard json column" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Change dashboard_encrypted column to MEDIUMBLOB" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create quota table v1" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_quota_org_id_user_id_target - v1" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Update quota table charset" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create plugin_setting table" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_plugin_setting_org_id_plugin_id - v1" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add column plugin_version to plugin_settings" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Update plugin_setting table charset" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create session table" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old table playlist table" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old table playlist_item table" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create playlist table v2" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create playlist item table v2" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Update playlist table charset" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Update playlist_item table charset" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="drop preferences table v2" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="drop preferences table v3" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create preferences table v3" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Update preferences table charset" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add column team_id in preferences" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Update team_id column values in preferences" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add column week_start in preferences" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create alert table v1" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index alert org_id & id " 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index alert state" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index alert dashboard_id" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Create alert_rule_tag table v1" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index alert_rule_tag.alert_id_tag_id" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_alert_rule_tag_alert_id_tag_id - v1" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table alert_rule_tag to alert_rule_tag_v1 - v1" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Create alert_rule_tag table v2" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_alert_rule_tag_alert_id_tag_id - Add unique index alert_rule_tag.alert_id_tag_id V2" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="copy alert_rule_tag v1 to v2" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="drop table alert_rule_tag_v1" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create alert_notification table v1" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add column is_default" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add column frequency" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add column send_reminder" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add column disable_resolve_message" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index alert_notification org_id & name" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Update alert table charset" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Update alert_notification table charset" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create notification_journal table v1" 2026-03-10T13:07:11.510 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index notification_journal org_id & alert_id & notifier_id" 2026-03-10T13:07:11.511 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="drop alert_notification_journal" 2026-03-10T13:07:11.511 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create alert_notification_state table v1" 2026-03-10T13:07:11.511 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index alert_notification_state org_id & alert_id & notifier_id" 2026-03-10T13:07:11.511 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add for to alert table" 2026-03-10T13:07:11.511 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add column uid in alert_notification" 2026-03-10T13:07:11.511 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Update uid column values in alert_notification" 2026-03-10T13:07:11.511 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index alert_notification_org_id_uid" 2026-03-10T13:07:11.511 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Remove unique index org_id_name" 2026-03-10T13:07:11.511 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add column secure_settings in alert_notification" 2026-03-10T13:07:11.511 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="alter alert.settings to mediumtext" 2026-03-10T13:07:11.511 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add non-unique index alert_notification_state_alert_id" 2026-03-10T13:07:11.511 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add non-unique index alert_rule_tag_alert_id" 2026-03-10T13:07:11.511 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old annotation table v4" 2026-03-10T13:07:11.511 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create annotation table v5" 2026-03-10T13:07:11.511 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index annotation 0 v3" 2026-03-10T13:07:11.511 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index annotation 1 v3" 2026-03-10T13:07:11.511 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index annotation 2 v3" 2026-03-10T13:07:11.511 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index annotation 3 v3" 2026-03-10T13:07:11.511 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index annotation 4 v3" 2026-03-10T13:07:11.511 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Update annotation table charset" 2026-03-10T13:07:11.511 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add column region_id to annotation table" 2026-03-10T13:07:11.511 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Drop category_id index" 2026-03-10T13:07:11.511 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add column tags to annotation table" 2026-03-10T13:07:11.511 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Create annotation_tag table v2" 2026-03-10T13:07:11.511 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index annotation_tag.annotation_id_tag_id" 2026-03-10T13:07:11.511 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_annotation_tag_annotation_id_tag_id - v2" 2026-03-10T13:07:11.511 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table annotation_tag to annotation_tag_v2 - v2" 2026-03-10T13:07:11.511 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Create annotation_tag table v3" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_annotation_tag_annotation_id_tag_id - Add unique index annotation_tag.annotation_id_tag_id V3" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="copy annotation_tag v2 to v3" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="drop table annotation_tag_v2" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Update alert annotations and set TEXT to empty" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add created time to annotation table" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add updated time to annotation table" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for created in annotation table" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for updated in annotation table" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Convert existing annotations from seconds to milliseconds" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add epoch_end column" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for epoch_end" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Make epoch_end the same as epoch" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Move region to single row" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Remove index org_id_epoch from annotation table" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Remove index org_id_dashboard_id_panel_id_epoch from annotation table" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for org_id_dashboard_id_epoch_end_epoch on annotation table" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for org_id_epoch_end_epoch on annotation table" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Remove index org_id_epoch_epoch_end from annotation table" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for alert_id on annotation table" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create test_data table" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_version table v1" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_version.dashboard_id" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index dashboard_version.dashboard_id and dashboard_version.version" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Set dashboard version to 1 where 0" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="save existing dashboard data in dashboard_version table v1" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="alter dashboard_version.data to mediumtext v1" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create team table" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index team.org_id" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index team_org_id_name" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create team member table" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index team_member.org_id" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index team_member_org_id_team_id_user_id" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index team_member.team_id" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add column email to team table" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add column external to team_member table" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add column permission to team_member table" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard acl table" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_acl_dashboard_id" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index dashboard_acl_dashboard_id_user_id" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index dashboard_acl_dashboard_id_team_id" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_acl_user_id" 2026-03-10T13:07:11.512 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_acl_team_id" 2026-03-10T13:07:11.513 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_acl_org_id_role" 2026-03-10T13:07:11.513 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_permission" 2026-03-10T13:07:11.513 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="save default acl rules in dashboard_acl table" 2026-03-10T13:07:11.513 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="delete acl rules for deleted dashboards and folders" 2026-03-10T13:07:11.513 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create tag table" 2026-03-10T13:07:11.513 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index tag.key_value" 2026-03-10T13:07:11.513 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create login attempt table" 2026-03-10T13:07:11.513 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index login_attempt.username" 2026-03-10T13:07:11.513 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_login_attempt_username - v1" 2026-03-10T13:07:11.513 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table login_attempt to login_attempt_tmp_qwerty - v1" 2026-03-10T13:07:11.513 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create login_attempt v2" 2026-03-10T13:07:11.513 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_login_attempt_username - v2" 2026-03-10T13:07:11.513 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="copy login_attempt v1 to v2" 2026-03-10T13:07:11.513 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="drop login_attempt_tmp_qwerty" 2026-03-10T13:07:11.513 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create user auth table" 2026-03-10T13:07:11.513 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_user_auth_auth_module_auth_id - v1" 2026-03-10T13:07:11.513 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="alter user_auth.auth_id to length 190" 2026-03-10T13:07:11.513 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add OAuth access token to user_auth" 2026-03-10T13:07:11.513 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add OAuth refresh token to user_auth" 2026-03-10T13:07:11.513 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add OAuth token type to user_auth" 2026-03-10T13:07:11.513 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add OAuth expiry to user_auth" 2026-03-10T13:07:11.513 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add index to user_id column in user_auth" 2026-03-10T13:07:11.513 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create server_lock table" 2026-03-10T13:07:11.513 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index server_lock.operation_uid" 2026-03-10T13:07:11.513 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create user auth token table" 2026-03-10T13:07:11.513 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index user_auth_token.auth_token" 2026-03-10T13:07:11.513 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index user_auth_token.prev_auth_token" 2026-03-10T13:07:11.513 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index user_auth_token.user_id" 2026-03-10T13:07:11.513 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add revoked_at to the user auth token" 2026-03-10T13:07:11.513 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create cache_data table" 2026-03-10T13:07:11.513 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index cache_data.cache_key" 2026-03-10T13:07:11.772 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create short_url table v1" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index short_url.org_id-uid" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="delete alert_definition table" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="recreate alert_definition table" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_definition on org_id and title columns" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_definition on org_id and uid columns" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="alter alert_definition table data column to mediumtext in mysql" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="drop index in alert_definition on org_id and title columns" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="drop index in alert_definition on org_id and uid columns" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index in alert_definition on org_id and title columns" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index in alert_definition on org_id and uid columns" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add column paused in alert_definition" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="drop alert_definition table" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="delete alert_definition_version table" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="recreate alert_definition_version table" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_definition_version table on alert_definition_id and version columns" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_definition_version table on alert_definition_uid and version columns" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="alter alert_definition_version table data column to mediumtext in mysql" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="drop alert_definition_version table" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create alert_instance table" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_instance table on def_org_id, def_uid and current_state columns" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_instance table on def_org_id, current_state columns" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add column current_state_end to alert_instance" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="remove index def_org_id, def_uid, current_state on alert_instance" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="remove index def_org_id, current_state on alert_instance" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="rename def_org_id to rule_org_id in alert_instance" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="rename def_uid to rule_uid in alert_instance" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index rule_org_id, rule_uid, current_state on alert_instance" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index rule_org_id, current_state on alert_instance" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create alert_rule table" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule on org_id and title columns" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule on org_id and uid columns" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule on org_id, namespace_uid, group_uid columns" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="alter alert_rule table data column to mediumtext in mysql" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add column for to alert_rule" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add column annotations to alert_rule" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add column labels to alert_rule" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="remove unique index from alert_rule on org_id, title columns" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule on org_id, namespase_uid and title columns" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add dashboard_uid column to alert_rule" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add panel_id column to alert_rule" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule on org_id, dashboard_uid and panel_id columns" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create alert_rule_version table" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule_version table on rule_org_id, rule_uid and version columns" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule_version table on rule_org_id, rule_namespace_uid and rule_group columns" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="alter alert_rule_version table data column to mediumtext in mysql" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add column for to alert_rule_version" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add column annotations to alert_rule_version" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add column labels to alert_rule_version" 2026-03-10T13:07:11.773 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id=create_alert_configuration_table 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add column default in alert_configuration" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="alert alert_configuration alertmanager_configuration column from TEXT to MEDIUMTEXT if mysql" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add column org_id in alert_configuration" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_configuration table on org_id column" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id=create_ngalert_configuration_table 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index in ngalert_configuration on org_id column" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="clear migration entry \"remove unified alerting data\"" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="move dashboard alerts to unified alerting" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create library_element table v1" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index library_element org_id-folder_id-name-kind" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create library_element_connection table v1" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index library_element_connection element_id-kind-connection_id" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index library_element org_id_uid" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="clone move dashboard alerts to unified alerting" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create data_keys table" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create kv_store table v1" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index kv_store.org_id-namespace-key" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="update dashboard_uid and panel_id from existing annotations" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create permission table" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index permission.role_id" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index role_id_action_scope" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create role table" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add column display_name" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add column group_name" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index role.org_id" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index role_org_id_name" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index role_org_id_uid" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create team role table" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index team_role.org_id" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index team_role_org_id_team_id_role_id" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index team_role.team_id" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create user role table" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index user_role.org_id" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index user_role_org_id_user_id_role_id" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index user_role.user_id" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create builtin role table" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index builtin_role.role_id" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index builtin_role.name" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Add column org_id to builtin_role table" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add index builtin_role.org_id" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index builtin_role_org_id_role_id_role" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="Remove unique index role_org_id_uid" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index role.uid" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="create seed assignment table" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index builtin_role_role_name" 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="migrations completed" logger=migrator performed=381 skipped=0 duration=270.694436ms 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Created default organization" logger=sqlstore 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Initialising plugins" logger=plugin.manager 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Plugin registered" logger=plugin.manager pluginId=input 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Plugin registered" logger=plugin.manager pluginId=vonage-status-panel 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Plugin registered" logger=plugin.manager pluginId=grafana-piechart-panel 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="Live Push Gateway initialization" logger=live.push_http 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=warn msg="[Deprecated] the datasource provisioning config is outdated. please upgrade" logger=provisioning.datasources filename=/etc/grafana/provisioning/datasources/ceph-dashboard.yml 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="inserting datasource from configuration " logger=provisioning.datasources name=Dashboard1 uid=P43CA22E17D0F9596 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="HTTP Server Listen" logger=http.server address=[::]:3000 protocol=https subUrl= socket= 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="warming cache for startup" logger=ngalert 2026-03-10T13:07:11.774 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:07:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:07:11+0000 lvl=info msg="starting MultiOrg Alertmanager" logger=ngalert.multiorg.alertmanager 2026-03-10T13:07:12.138 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:12 vm08 ceph-mon[49535]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]': finished 2026-03-10T13:07:12.138 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:12 vm08 ceph-mon[49535]: osdmap e56: 8 total, 8 up, 8 in 2026-03-10T13:07:12.138 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:12 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:12.138 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:12 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:07:12.138 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:12 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:07:12.138 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:12 vm08 ceph-mon[49535]: osdmap e57: 8 total, 8 up, 8 in 2026-03-10T13:07:12.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:12 vm00 ceph-mon[47364]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]': finished 2026-03-10T13:07:12.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:12 vm00 ceph-mon[47364]: osdmap e56: 8 total, 8 up, 8 in 2026-03-10T13:07:12.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:12 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:12.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:12 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:07:12.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:12 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:07:12.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:12 vm00 ceph-mon[47364]: osdmap e57: 8 total, 8 up, 8 in 2026-03-10T13:07:12.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:12 vm00 ceph-mon[51670]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]': finished 2026-03-10T13:07:12.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:12 vm00 ceph-mon[51670]: osdmap e56: 8 total, 8 up, 8 in 2026-03-10T13:07:12.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:12 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:12.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:12 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:07:12.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:12 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:07:12.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:12 vm00 ceph-mon[51670]: osdmap e57: 8 total, 8 up, 8 in 2026-03-10T13:07:13.062 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch apply iscsi foo u p' 2026-03-10T13:07:13.342 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:07:13 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:07:13] "GET /metrics HTTP/1.1" 200 192182 "" "Prometheus/2.33.4" 2026-03-10T13:07:13.595 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:13 vm00 ceph-mon[47364]: pgmap v30: 161 pgs: 32 unknown, 129 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 1.4 KiB/s wr, 3 op/s 2026-03-10T13:07:13.595 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:13 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:13.595 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:13 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:13.595 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:13 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:13.595 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:13 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:13.595 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:13 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:13.595 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:13 vm00 ceph-mon[47364]: Saving service rgw.foo spec with placement count:2 2026-03-10T13:07:13.595 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:13 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:13.595 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:13 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm00.tvlvzo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T13:07:13.595 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:13 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm00.tvlvzo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T13:07:13.595 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:13 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm00.tvlvzo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T13:07:13.595 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:13 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:13.595 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:13 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:07:13.595 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:13 vm00 ceph-mon[47364]: osdmap e58: 8 total, 8 up, 8 in 2026-03-10T13:07:13.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:13 vm00 ceph-mon[51670]: pgmap v30: 161 pgs: 32 unknown, 129 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 1.4 KiB/s wr, 3 op/s 2026-03-10T13:07:13.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:13 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:13.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:13 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:13.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:13 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:13.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:13 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:13.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:13 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:13.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:13 vm00 ceph-mon[51670]: Saving service rgw.foo spec with placement count:2 2026-03-10T13:07:13.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:13 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:13.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:13 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm00.tvlvzo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T13:07:13.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:13 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm00.tvlvzo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T13:07:13.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:13 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm00.tvlvzo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T13:07:13.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:13 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:13.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:13 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:07:13.595 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:13 vm00 ceph-mon[51670]: osdmap e58: 8 total, 8 up, 8 in 2026-03-10T13:07:13.605 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:13 vm08 ceph-mon[49535]: pgmap v30: 161 pgs: 32 unknown, 129 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 1.4 KiB/s wr, 3 op/s 2026-03-10T13:07:13.605 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:13 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:13.605 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:13 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:13.605 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:13 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:13.605 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:13 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:13.605 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:13 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:13.605 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:13 vm08 ceph-mon[49535]: Saving service rgw.foo spec with placement count:2 2026-03-10T13:07:13.605 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:13 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:13.605 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:13 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm00.tvlvzo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T13:07:13.605 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:13 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm00.tvlvzo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T13:07:13.605 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:13 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm00.tvlvzo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T13:07:13.605 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:13 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:13.605 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:13 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:07:13.605 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:13 vm08 ceph-mon[49535]: osdmap e58: 8 total, 8 up, 8 in 2026-03-10T13:07:13.858 INFO:teuthology.orchestra.run.vm00.stdout:Scheduled iscsi.foo update... 2026-03-10T13:07:13.922 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'sleep 180' 2026-03-10T13:07:14.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:14 vm00 ceph-mon[47364]: Deploying daemon rgw.foo.vm00.tvlvzo on vm00 2026-03-10T13:07:14.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:14 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:14.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:14 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm08.ljayps", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T13:07:14.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:14 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm08.ljayps", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T13:07:14.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:14 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm08.ljayps", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T13:07:14.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:14 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:14.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:14 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:07:14.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:14 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:14.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:14 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:14.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:14 vm00 ceph-mon[51670]: Deploying daemon rgw.foo.vm00.tvlvzo on vm00 2026-03-10T13:07:14.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:14 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:14.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:14 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm08.ljayps", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T13:07:14.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:14 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm08.ljayps", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T13:07:14.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:14 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm08.ljayps", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T13:07:14.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:14 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:14.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:14 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:07:14.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:14 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:14.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:14 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:14.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:14 vm08 ceph-mon[49535]: Deploying daemon rgw.foo.vm00.tvlvzo on vm00 2026-03-10T13:07:14.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:14 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:14.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:14 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm08.ljayps", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T13:07:14.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:14 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm08.ljayps", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T13:07:14.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:14 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm08.ljayps", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T13:07:14.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:14 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:14.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:14 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:07:14.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:14 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:14.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:14 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:15.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:15 vm08 ceph-mon[49535]: Deploying daemon rgw.foo.vm08.ljayps on vm08 2026-03-10T13:07:15.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:15 vm08 ceph-mon[49535]: pgmap v33: 161 pgs: 32 unknown, 129 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 1.7 KiB/s wr, 4 op/s 2026-03-10T13:07:15.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:15 vm08 ceph-mon[49535]: from='client.24538 -' entity='client.admin' cmd=[{"prefix": "orch apply iscsi", "pool": "foo", "api_user": "u", "api_password": "p", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:07:15.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:15 vm08 ceph-mon[49535]: Saving service iscsi.foo spec with placement count:1 2026-03-10T13:07:15.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:15 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:15.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:15 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:07:15.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:15 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:07:15.904 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:15 vm00 ceph-mon[47364]: Deploying daemon rgw.foo.vm08.ljayps on vm08 2026-03-10T13:07:15.904 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:15 vm00 ceph-mon[47364]: pgmap v33: 161 pgs: 32 unknown, 129 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 1.7 KiB/s wr, 4 op/s 2026-03-10T13:07:15.904 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:15 vm00 ceph-mon[47364]: from='client.24538 -' entity='client.admin' cmd=[{"prefix": "orch apply iscsi", "pool": "foo", "api_user": "u", "api_password": "p", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:07:15.904 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:15 vm00 ceph-mon[47364]: Saving service iscsi.foo spec with placement count:1 2026-03-10T13:07:15.904 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:15 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:15.904 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:15 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:07:15.905 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:15 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:07:15.905 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:15 vm00 ceph-mon[51670]: Deploying daemon rgw.foo.vm08.ljayps on vm08 2026-03-10T13:07:15.905 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:15 vm00 ceph-mon[51670]: pgmap v33: 161 pgs: 32 unknown, 129 active+clean; 453 KiB data, 53 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 1.7 KiB/s wr, 4 op/s 2026-03-10T13:07:15.905 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:15 vm00 ceph-mon[51670]: from='client.24538 -' entity='client.admin' cmd=[{"prefix": "orch apply iscsi", "pool": "foo", "api_user": "u", "api_password": "p", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:07:15.905 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:15 vm00 ceph-mon[51670]: Saving service iscsi.foo spec with placement count:1 2026-03-10T13:07:15.905 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:15 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:15.905 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:15 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:07:15.905 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:15 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:07:16.771 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:07:16 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:07:16] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T13:07:17.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:17 vm00 ceph-mon[47364]: pgmap v34: 161 pgs: 161 active+clean; 456 KiB data, 55 MiB used, 160 GiB / 160 GiB avail; 24 KiB/s rd, 6.3 KiB/s wr, 74 op/s 2026-03-10T13:07:17.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:17 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:17.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:17 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:17.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:17 vm00 ceph-mon[47364]: Checking dashboard <-> RGW credentials 2026-03-10T13:07:17.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:17 vm00 ceph-mon[51670]: pgmap v34: 161 pgs: 161 active+clean; 456 KiB data, 55 MiB used, 160 GiB / 160 GiB avail; 24 KiB/s rd, 6.3 KiB/s wr, 74 op/s 2026-03-10T13:07:17.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:17 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:17.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:17 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:17.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:17 vm00 ceph-mon[51670]: Checking dashboard <-> RGW credentials 2026-03-10T13:07:17.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:17 vm08 ceph-mon[49535]: pgmap v34: 161 pgs: 161 active+clean; 456 KiB data, 55 MiB used, 160 GiB / 160 GiB avail; 24 KiB/s rd, 6.3 KiB/s wr, 74 op/s 2026-03-10T13:07:17.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:17 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:17.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:17 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:17.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:17 vm08 ceph-mon[49535]: Checking dashboard <-> RGW credentials 2026-03-10T13:07:18.863 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:18 vm00 ceph-mon[47364]: osdmap e59: 8 total, 8 up, 8 in 2026-03-10T13:07:18.863 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:18 vm00 ceph-mon[47364]: pgmap v36: 161 pgs: 161 active+clean; 456 KiB data, 55 MiB used, 160 GiB / 160 GiB avail; 23 KiB/s rd, 5.8 KiB/s wr, 69 op/s 2026-03-10T13:07:18.863 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:18 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:18.863 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:18 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:18.863 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:18 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:18.863 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:18 vm00 ceph-mon[47364]: Checking pool "foo" exists for service iscsi.foo 2026-03-10T13:07:18.864 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:18 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:07:18.864 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:18 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:07:18.864 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:18 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]': finished 2026-03-10T13:07:18.864 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:18 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:07:18.864 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:18 vm00 ceph-mon[47364]: Deploying daemon iscsi.foo.vm00.dezodo on vm00 2026-03-10T13:07:18.864 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:18 vm00 ceph-mon[51670]: osdmap e59: 8 total, 8 up, 8 in 2026-03-10T13:07:18.864 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:18 vm00 ceph-mon[51670]: pgmap v36: 161 pgs: 161 active+clean; 456 KiB data, 55 MiB used, 160 GiB / 160 GiB avail; 23 KiB/s rd, 5.8 KiB/s wr, 69 op/s 2026-03-10T13:07:18.864 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:18 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:18.864 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:18 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:18.864 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:18 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:18.864 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:18 vm00 ceph-mon[51670]: Checking pool "foo" exists for service iscsi.foo 2026-03-10T13:07:18.864 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:18 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:07:18.864 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:18 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:07:18.864 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:18 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]': finished 2026-03-10T13:07:18.864 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:18 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:07:18.864 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:18 vm00 ceph-mon[51670]: Deploying daemon iscsi.foo.vm00.dezodo on vm00 2026-03-10T13:07:18.974 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:18 vm08 ceph-mon[49535]: osdmap e59: 8 total, 8 up, 8 in 2026-03-10T13:07:18.974 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:18 vm08 ceph-mon[49535]: pgmap v36: 161 pgs: 161 active+clean; 456 KiB data, 55 MiB used, 160 GiB / 160 GiB avail; 23 KiB/s rd, 5.8 KiB/s wr, 69 op/s 2026-03-10T13:07:18.974 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:18 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:18.974 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:18 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:18.974 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:18 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:18.974 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:18 vm08 ceph-mon[49535]: Checking pool "foo" exists for service iscsi.foo 2026-03-10T13:07:18.974 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:18 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:07:18.974 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:18 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:07:18.974 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:18 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]': finished 2026-03-10T13:07:18.974 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:18 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:07:18.974 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:18 vm08 ceph-mon[49535]: Deploying daemon iscsi.foo.vm00.dezodo on vm00 2026-03-10T13:07:19.874 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:19 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:19.874 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:19 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:19.874 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:19 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:07:19.874 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:19 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:07:19.874 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:19 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/3900353820' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T13:07:19.874 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:19 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:19.874 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:19 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:19.874 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:19 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:07:19.874 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:19 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:07:19.875 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:19 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/3900353820' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T13:07:19.973 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:19 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:19.973 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:19 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:19.973 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:19 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:07:19.974 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:19 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:07:19.974 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:19 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/3900353820' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T13:07:20.827 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:20 vm00 ceph-mon[47364]: pgmap v37: 161 pgs: 161 active+clean; 456 KiB data, 55 MiB used, 160 GiB / 160 GiB avail; 18 KiB/s rd, 4.6 KiB/s wr, 54 op/s 2026-03-10T13:07:20.827 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:20 vm00 ceph-mon[47364]: mgrmap e20: y(active, since 36s), standbys: x 2026-03-10T13:07:20.827 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:20 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/1098235097' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1813360869"}]: dispatch 2026-03-10T13:07:20.827 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:20 vm00 ceph-mon[47364]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1813360869"}]: dispatch 2026-03-10T13:07:20.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:20 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:20.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:20 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:20.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:20 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:20.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:20 vm00 ceph-mon[47364]: Reconfiguring alertmanager.a (dependencies changed)... 2026-03-10T13:07:20.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:20 vm00 ceph-mon[47364]: Reconfiguring daemon alertmanager.a on vm00 2026-03-10T13:07:20.828 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:07:20 vm00 systemd[1]: Stopping Ceph alertmanager.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:07:20.828 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:07:20 vm00 bash[72958]: Error: no container with name or ID "ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager.a" found: no such container 2026-03-10T13:07:20.828 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:07:20 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[69533]: level=info ts=2026-03-10T13:07:20.811Z caller=main.go:557 msg="Received SIGTERM, exiting gracefully..." 2026-03-10T13:07:20.828 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:20 vm00 ceph-mon[51670]: pgmap v37: 161 pgs: 161 active+clean; 456 KiB data, 55 MiB used, 160 GiB / 160 GiB avail; 18 KiB/s rd, 4.6 KiB/s wr, 54 op/s 2026-03-10T13:07:20.828 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:20 vm00 ceph-mon[51670]: mgrmap e20: y(active, since 36s), standbys: x 2026-03-10T13:07:20.828 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:20 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/1098235097' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1813360869"}]: dispatch 2026-03-10T13:07:20.828 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:20 vm00 ceph-mon[51670]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1813360869"}]: dispatch 2026-03-10T13:07:20.828 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:20 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:20.828 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:20 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:20.828 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:20 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:20.828 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:20 vm00 ceph-mon[51670]: Reconfiguring alertmanager.a (dependencies changed)... 2026-03-10T13:07:20.828 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:20 vm00 ceph-mon[51670]: Reconfiguring daemon alertmanager.a on vm00 2026-03-10T13:07:21.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:20 vm08 ceph-mon[49535]: pgmap v37: 161 pgs: 161 active+clean; 456 KiB data, 55 MiB used, 160 GiB / 160 GiB avail; 18 KiB/s rd, 4.6 KiB/s wr, 54 op/s 2026-03-10T13:07:21.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:20 vm08 ceph-mon[49535]: mgrmap e20: y(active, since 36s), standbys: x 2026-03-10T13:07:21.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:20 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/1098235097' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1813360869"}]: dispatch 2026-03-10T13:07:21.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:20 vm08 ceph-mon[49535]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1813360869"}]: dispatch 2026-03-10T13:07:21.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:20 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:21.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:20 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:21.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:20 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:21.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:20 vm08 ceph-mon[49535]: Reconfiguring alertmanager.a (dependencies changed)... 2026-03-10T13:07:21.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:20 vm08 ceph-mon[49535]: Reconfiguring daemon alertmanager.a on vm00 2026-03-10T13:07:21.098 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:07:20 vm00 podman[72965]: 2026-03-10 13:07:20.826255596 +0000 UTC m=+0.030482823 container died 50fb5c16714e3174b77b4199fd572c991478e524ebf19d8c900cfa92401db60e (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-10T13:07:21.098 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:07:20 vm00 podman[72965]: 2026-03-10 13:07:20.848024493 +0000 UTC m=+0.052251720 container remove 50fb5c16714e3174b77b4199fd572c991478e524ebf19d8c900cfa92401db60e (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-10T13:07:21.098 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:07:20 vm00 podman[72965]: 2026-03-10 13:07:20.849506582 +0000 UTC m=+0.053733809 volume remove 93fdf7242f4489295b0b32048eeaf99eafb327666b97c7e1e5fd392b9d19e105 2026-03-10T13:07:21.098 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:07:20 vm00 bash[72965]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a 2026-03-10T13:07:21.098 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:07:20 vm00 bash[73006]: Error: no container with name or ID "ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager.a" found: no such container 2026-03-10T13:07:21.098 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:07:20 vm00 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@alertmanager.a.service: Deactivated successfully. 2026-03-10T13:07:21.098 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:07:20 vm00 systemd[1]: Stopped Ceph alertmanager.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:07:21.098 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:07:20 vm00 systemd[1]: Starting Ceph alertmanager.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:07:21.098 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:07:21 vm00 podman[73047]: 2026-03-10 13:07:21.014960807 +0000 UTC m=+0.019599301 volume create c202a28c8467f574183faa5822471a8c63f60aab7ef1afab68e9fd2b2d6e7ec0 2026-03-10T13:07:21.098 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:07:21 vm00 podman[73047]: 2026-03-10 13:07:21.018315315 +0000 UTC m=+0.022953809 container create da91a70a93ac7f97f86bee4205d1a93a3007359e4a56018a04f1f652c57338c0 (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-10T13:07:21.098 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:07:21 vm00 podman[73047]: 2026-03-10 13:07:21.048337454 +0000 UTC m=+0.052975959 container init da91a70a93ac7f97f86bee4205d1a93a3007359e4a56018a04f1f652c57338c0 (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-10T13:07:21.098 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:07:21 vm00 podman[73047]: 2026-03-10 13:07:21.051072691 +0000 UTC m=+0.055711185 container start da91a70a93ac7f97f86bee4205d1a93a3007359e4a56018a04f1f652c57338c0 (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-10T13:07:21.098 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:07:21 vm00 bash[73047]: da91a70a93ac7f97f86bee4205d1a93a3007359e4a56018a04f1f652c57338c0 2026-03-10T13:07:21.098 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:07:21 vm00 podman[73047]: 2026-03-10 13:07:21.007270561 +0000 UTC m=+0.011909055 image pull ba2b418f427c0636d654de8757e830c80168e76482bcc46bb2138e569d6c91d4 quay.io/prometheus/alertmanager:v0.23.0 2026-03-10T13:07:21.098 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:07:21 vm00 systemd[1]: Started Ceph alertmanager.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:07:21.098 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:07:21 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=info ts=2026-03-10T13:07:21.061Z caller=main.go:225 msg="Starting Alertmanager" version="(version=0.23.0, branch=HEAD, revision=61046b17771a57cfd4c4a51be370ab930a4d7d54)" 2026-03-10T13:07:21.098 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:07:21 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=info ts=2026-03-10T13:07:21.061Z caller=main.go:226 build_context="(go=go1.16.7, user=root@e21a959be8d2, date=20210825-10:48:55)" 2026-03-10T13:07:21.098 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:07:21 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=info ts=2026-03-10T13:07:21.062Z caller=cluster.go:184 component=cluster msg="setting advertise address explicitly" addr=192.168.123.100 port=9094 2026-03-10T13:07:21.098 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:07:21 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=info ts=2026-03-10T13:07:21.062Z caller=cluster.go:671 component=cluster msg="Waiting for gossip to settle..." interval=2s 2026-03-10T13:07:21.098 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:07:21 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=info ts=2026-03-10T13:07:21.096Z caller=coordinator.go:113 component=configuration msg="Loading configuration file" file=/etc/alertmanager/alertmanager.yml 2026-03-10T13:07:21.503 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:07:21 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=info ts=2026-03-10T13:07:21.097Z caller=coordinator.go:126 component=configuration msg="Completed loading of configuration file" file=/etc/alertmanager/alertmanager.yml 2026-03-10T13:07:21.503 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:07:21 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=info ts=2026-03-10T13:07:21.098Z caller=main.go:518 msg=Listening address=:9093 2026-03-10T13:07:21.503 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:07:21 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=info ts=2026-03-10T13:07:21.098Z caller=tls_config.go:191 msg="TLS is disabled." http2=false 2026-03-10T13:07:21.616 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 systemd[1]: Stopping Ceph prometheus.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:07:21.616 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 bash[66829]: Error: no container with name or ID "ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus.a" found: no such container 2026-03-10T13:07:21.616 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[64544]: ts=2026-03-10T13:07:21.550Z caller=main.go:775 level=warn msg="Received SIGTERM, exiting gracefully..." 2026-03-10T13:07:21.617 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[64544]: ts=2026-03-10T13:07:21.550Z caller=main.go:798 level=info msg="Stopping scrape discovery manager..." 2026-03-10T13:07:21.617 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[64544]: ts=2026-03-10T13:07:21.550Z caller=main.go:812 level=info msg="Stopping notify discovery manager..." 2026-03-10T13:07:21.617 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[64544]: ts=2026-03-10T13:07:21.550Z caller=main.go:834 level=info msg="Stopping scrape manager..." 2026-03-10T13:07:21.617 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[64544]: ts=2026-03-10T13:07:21.550Z caller=main.go:794 level=info msg="Scrape discovery manager stopped" 2026-03-10T13:07:21.617 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[64544]: ts=2026-03-10T13:07:21.550Z caller=main.go:808 level=info msg="Notify discovery manager stopped" 2026-03-10T13:07:21.617 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[64544]: ts=2026-03-10T13:07:21.551Z caller=manager.go:945 level=info component="rule manager" msg="Stopping rule manager..." 2026-03-10T13:07:21.617 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[64544]: ts=2026-03-10T13:07:21.551Z caller=manager.go:955 level=info component="rule manager" msg="Rule manager stopped" 2026-03-10T13:07:21.617 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[64544]: ts=2026-03-10T13:07:21.551Z caller=main.go:828 level=info msg="Scrape manager stopped" 2026-03-10T13:07:21.617 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[64544]: ts=2026-03-10T13:07:21.552Z caller=notifier.go:600 level=info component=notifier msg="Stopping notification manager..." 2026-03-10T13:07:21.617 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[64544]: ts=2026-03-10T13:07:21.552Z caller=main.go:1054 level=info msg="Notifier manager stopped" 2026-03-10T13:07:21.617 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[64544]: ts=2026-03-10T13:07:21.552Z caller=main.go:1066 level=info msg="See you next time!" 2026-03-10T13:07:21.617 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 podman[66836]: 2026-03-10 13:07:21.562600803 +0000 UTC m=+0.026335576 container died e06f967e832be536370b603f85761784ec07d538abfefad4a6ab61aac018c85d (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T13:07:21.617 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 podman[66836]: 2026-03-10 13:07:21.579996898 +0000 UTC m=+0.043731661 container remove e06f967e832be536370b603f85761784ec07d538abfefad4a6ab61aac018c85d (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T13:07:21.617 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 bash[66836]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a 2026-03-10T13:07:21.617 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 bash[66854]: Error: no container with name or ID "ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus.a" found: no such container 2026-03-10T13:07:21.900 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@prometheus.a.service: Deactivated successfully. 2026-03-10T13:07:21.900 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 systemd[1]: Stopped Ceph prometheus.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:07:21.900 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 systemd[1]: Starting Ceph prometheus.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:07:21.901 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 podman[66895]: 2026-03-10 13:07:21.781433779 +0000 UTC m=+0.066059607 container create 0a921c94fbaee48eb66569df197ffd4f8f996767222a7d3a6c95dda415ba2c8f (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T13:07:21.901 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 podman[66895]: 2026-03-10 13:07:21.727474432 +0000 UTC m=+0.012100260 image pull 514e6a882f6e74806a5856468489eeff8d7106095557578da96935e4d0ba4d9d quay.io/prometheus/prometheus:v2.33.4 2026-03-10T13:07:21.901 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 podman[66895]: 2026-03-10 13:07:21.899085121 +0000 UTC m=+0.183710939 container init 0a921c94fbaee48eb66569df197ffd4f8f996767222a7d3a6c95dda415ba2c8f (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T13:07:22.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:21 vm00 ceph-mon[47364]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1813360869"}]': finished 2026-03-10T13:07:22.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:21 vm00 ceph-mon[47364]: osdmap e60: 8 total, 8 up, 8 in 2026-03-10T13:07:22.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:21 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/4271043141' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1569387625"}]: dispatch 2026-03-10T13:07:22.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:21 vm00 ceph-mon[47364]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1569387625"}]: dispatch 2026-03-10T13:07:22.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:21 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:22.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:21 vm00 ceph-mon[47364]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-10T13:07:22.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:21 vm00 ceph-mon[47364]: Reconfiguring daemon prometheus.a on vm08 2026-03-10T13:07:22.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:21 vm00 ceph-mon[51670]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1813360869"}]': finished 2026-03-10T13:07:22.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:21 vm00 ceph-mon[51670]: osdmap e60: 8 total, 8 up, 8 in 2026-03-10T13:07:22.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:21 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/4271043141' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1569387625"}]: dispatch 2026-03-10T13:07:22.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:21 vm00 ceph-mon[51670]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1569387625"}]: dispatch 2026-03-10T13:07:22.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:21 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:22.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:21 vm00 ceph-mon[51670]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-10T13:07:22.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:21 vm00 ceph-mon[51670]: Reconfiguring daemon prometheus.a on vm08 2026-03-10T13:07:22.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:21 vm08 ceph-mon[49535]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1813360869"}]': finished 2026-03-10T13:07:22.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:21 vm08 ceph-mon[49535]: osdmap e60: 8 total, 8 up, 8 in 2026-03-10T13:07:22.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:21 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/4271043141' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1569387625"}]: dispatch 2026-03-10T13:07:22.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:21 vm08 ceph-mon[49535]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1569387625"}]: dispatch 2026-03-10T13:07:22.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:21 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:22.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:21 vm08 ceph-mon[49535]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-10T13:07:22.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:21 vm08 ceph-mon[49535]: Reconfiguring daemon prometheus.a on vm08 2026-03-10T13:07:22.272 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 podman[66895]: 2026-03-10 13:07:21.902992951 +0000 UTC m=+0.187618769 container start 0a921c94fbaee48eb66569df197ffd4f8f996767222a7d3a6c95dda415ba2c8f (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T13:07:22.272 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 bash[66895]: 0a921c94fbaee48eb66569df197ffd4f8f996767222a7d3a6c95dda415ba2c8f 2026-03-10T13:07:22.272 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 systemd[1]: Started Ceph prometheus.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:07:22.272 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[66905]: ts=2026-03-10T13:07:21.941Z caller=main.go:475 level=info msg="No time or size retention was set so using the default time retention" duration=15d 2026-03-10T13:07:22.272 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[66905]: ts=2026-03-10T13:07:21.942Z caller=main.go:512 level=info msg="Starting Prometheus" version="(version=2.33.4, branch=HEAD, revision=83032011a5d3e6102624fe58241a374a7201fee8)" 2026-03-10T13:07:22.272 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[66905]: ts=2026-03-10T13:07:21.942Z caller=main.go:517 level=info build_context="(go=go1.17.7, user=root@d13bf69e7be8, date=20220222-16:51:28)" 2026-03-10T13:07:22.272 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[66905]: ts=2026-03-10T13:07:21.942Z caller=main.go:518 level=info host_details="(Linux 5.14.0-686.el9.x86_64 #1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026 x86_64 vm08 (none))" 2026-03-10T13:07:22.272 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[66905]: ts=2026-03-10T13:07:21.942Z caller=main.go:519 level=info fd_limits="(soft=1048576, hard=1048576)" 2026-03-10T13:07:22.272 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[66905]: ts=2026-03-10T13:07:21.942Z caller=main.go:520 level=info vm_limits="(soft=unlimited, hard=unlimited)" 2026-03-10T13:07:22.272 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[66905]: ts=2026-03-10T13:07:21.946Z caller=web.go:570 level=info component=web msg="Start listening for connections" address=:9095 2026-03-10T13:07:22.272 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[66905]: ts=2026-03-10T13:07:21.947Z caller=main.go:923 level=info msg="Starting TSDB ..." 2026-03-10T13:07:22.272 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[66905]: ts=2026-03-10T13:07:21.950Z caller=head.go:493 level=info component=tsdb msg="Replaying on-disk memory mappable chunks if any" 2026-03-10T13:07:22.272 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[66905]: ts=2026-03-10T13:07:21.950Z caller=head.go:527 level=info component=tsdb msg="On-disk memory mappable chunks replay completed" duration=2.444µs 2026-03-10T13:07:22.272 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[66905]: ts=2026-03-10T13:07:21.950Z caller=head.go:533 level=info component=tsdb msg="Replaying WAL, this may take a while" 2026-03-10T13:07:22.272 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:21 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[66905]: ts=2026-03-10T13:07:21.951Z caller=tls_config.go:195 level=info component=web msg="TLS is disabled." http2=false 2026-03-10T13:07:23.034 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[47364]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 144 KiB/s rd, 4.9 KiB/s wr, 272 op/s 2026-03-10T13:07:23.034 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[47364]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1569387625"}]': finished 2026-03-10T13:07:23.034 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[47364]: osdmap e61: 8 total, 8 up, 8 in 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[47364]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.100:9093"}]: dispatch 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[47364]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.100:9093"}]: dispatch 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[47364]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[47364]: Adding iSCSI gateway http://:@192.168.123.100:5000 to Dashboard 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[47364]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm00"}]: dispatch 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[47364]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm00"}]: dispatch 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[47364]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.108:9095"}]: dispatch 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[47364]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.108:9095"}]: dispatch 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[47364]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.108:3000"}]: dispatch 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[47364]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.108:3000"}]: dispatch 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/3351730502' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3821537877"}]: dispatch 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[47364]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3821537877"}]: dispatch 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[51670]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 144 KiB/s rd, 4.9 KiB/s wr, 272 op/s 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[51670]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1569387625"}]': finished 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[51670]: osdmap e61: 8 total, 8 up, 8 in 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[51670]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.100:9093"}]: dispatch 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[51670]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.100:9093"}]: dispatch 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[51670]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[51670]: Adding iSCSI gateway http://:@192.168.123.100:5000 to Dashboard 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[51670]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm00"}]: dispatch 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[51670]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm00"}]: dispatch 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[51670]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.108:9095"}]: dispatch 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[51670]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.108:9095"}]: dispatch 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T13:07:23.035 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[51670]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T13:07:23.036 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.108:3000"}]: dispatch 2026-03-10T13:07:23.036 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[51670]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.108:3000"}]: dispatch 2026-03-10T13:07:23.036 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:23.036 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:07:23.036 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:07:23.036 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/3351730502' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3821537877"}]: dispatch 2026-03-10T13:07:23.036 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:22 vm00 ceph-mon[51670]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3821537877"}]: dispatch 2026-03-10T13:07:23.105 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:22 vm08 ceph-mon[49535]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 144 KiB/s rd, 4.9 KiB/s wr, 272 op/s 2026-03-10T13:07:23.105 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:22 vm08 ceph-mon[49535]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1569387625"}]': finished 2026-03-10T13:07:23.105 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:22 vm08 ceph-mon[49535]: osdmap e61: 8 total, 8 up, 8 in 2026-03-10T13:07:23.105 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:22 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:23.105 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:22 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T13:07:23.105 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:22 vm08 ceph-mon[49535]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T13:07:23.105 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:22 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.100:9093"}]: dispatch 2026-03-10T13:07:23.105 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:22 vm08 ceph-mon[49535]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.100:9093"}]: dispatch 2026-03-10T13:07:23.105 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:22 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:23.105 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:22 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T13:07:23.105 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:22 vm08 ceph-mon[49535]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T13:07:23.105 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:22 vm08 ceph-mon[49535]: Adding iSCSI gateway http://:@192.168.123.100:5000 to Dashboard 2026-03-10T13:07:23.105 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:22 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T13:07:23.105 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:22 vm08 ceph-mon[49535]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T13:07:23.105 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:22 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:23.105 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:22 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm00"}]: dispatch 2026-03-10T13:07:23.105 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:22 vm08 ceph-mon[49535]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm00"}]: dispatch 2026-03-10T13:07:23.105 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:22 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:23.105 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:22 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T13:07:23.105 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:22 vm08 ceph-mon[49535]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T13:07:23.105 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:22 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.108:9095"}]: dispatch 2026-03-10T13:07:23.105 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:22 vm08 ceph-mon[49535]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.108:9095"}]: dispatch 2026-03-10T13:07:23.106 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:22 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:23.106 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:22 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T13:07:23.106 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:22 vm08 ceph-mon[49535]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T13:07:23.106 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:22 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.108:3000"}]: dispatch 2026-03-10T13:07:23.106 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:22 vm08 ceph-mon[49535]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.108:3000"}]: dispatch 2026-03-10T13:07:23.106 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:22 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:23.106 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:22 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:07:23.106 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:22 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:07:23.106 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:22 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/3351730502' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3821537877"}]: dispatch 2026-03-10T13:07:23.106 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:22 vm08 ceph-mon[49535]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3821537877"}]: dispatch 2026-03-10T13:07:23.503 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:07:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=info ts=2026-03-10T13:07:23.063Z caller=cluster.go:696 component=cluster msg="gossip not settled" polls=0 before=0 now=1 elapsed=2.000562652s 2026-03-10T13:07:23.521 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:23 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[66905]: ts=2026-03-10T13:07:23.323Z caller=head.go:604 level=info component=tsdb msg="WAL segment loaded" segment=0 maxSegment=1 2026-03-10T13:07:23.521 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:23 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[66905]: ts=2026-03-10T13:07:23.323Z caller=head.go:604 level=info component=tsdb msg="WAL segment loaded" segment=1 maxSegment=1 2026-03-10T13:07:23.521 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:23 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[66905]: ts=2026-03-10T13:07:23.323Z caller=head.go:610 level=info component=tsdb msg="WAL replay completed" checkpoint_replay_duration=268.544µs wal_replay_duration=1.372751892s total_replay_duration=1.373034441s 2026-03-10T13:07:23.521 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:23 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[66905]: ts=2026-03-10T13:07:23.324Z caller=main.go:944 level=info fs_type=XFS_SUPER_MAGIC 2026-03-10T13:07:23.521 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:23 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[66905]: ts=2026-03-10T13:07:23.324Z caller=main.go:947 level=info msg="TSDB started" 2026-03-10T13:07:23.521 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:23 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[66905]: ts=2026-03-10T13:07:23.324Z caller=main.go:1128 level=info msg="Loading configuration file" filename=/etc/prometheus/prometheus.yml 2026-03-10T13:07:23.521 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:23 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[66905]: ts=2026-03-10T13:07:23.335Z caller=main.go:1165 level=info msg="Completed loading of configuration file" filename=/etc/prometheus/prometheus.yml totalDuration=10.897614ms db_storage=531ns remote_storage=1.073µs web_handler=331ns query_engine=591ns scrape=654.247µs scrape_sd=26.991µs notify=25.137µs notify_sd=5.781µs rules=10.000792ms 2026-03-10T13:07:23.521 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:07:23 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[66905]: ts=2026-03-10T13:07:23.335Z caller=main.go:896 level=info msg="Server is ready to receive web requests." 2026-03-10T13:07:24.151 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:24 vm08 ceph-mon[49535]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3821537877"}]': finished 2026-03-10T13:07:24.151 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:24 vm08 ceph-mon[49535]: osdmap e62: 8 total, 8 up, 8 in 2026-03-10T13:07:24.151 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:24 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:24.151 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:24 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:24.151 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:24 vm08 ceph-mon[49535]: Checking dashboard <-> RGW credentials 2026-03-10T13:07:24.151 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:24 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/3182423445' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/2653325477"}]: dispatch 2026-03-10T13:07:24.151 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:24 vm08 ceph-mon[49535]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/2653325477"}]: dispatch 2026-03-10T13:07:24.151 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:24 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:24.151 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:24 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:24.151 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:24 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:07:24.151 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:24 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:07:24.152 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:24 vm00 ceph-mon[47364]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3821537877"}]': finished 2026-03-10T13:07:24.152 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:24 vm00 ceph-mon[47364]: osdmap e62: 8 total, 8 up, 8 in 2026-03-10T13:07:24.152 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:24 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:24.152 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:24 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:24.152 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:24 vm00 ceph-mon[47364]: Checking dashboard <-> RGW credentials 2026-03-10T13:07:24.152 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:24 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/3182423445' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/2653325477"}]: dispatch 2026-03-10T13:07:24.152 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:24 vm00 ceph-mon[47364]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/2653325477"}]: dispatch 2026-03-10T13:07:24.152 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:24 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:24.152 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:24 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:24.152 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:24 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:07:24.152 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:24 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:07:24.152 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:24 vm00 ceph-mon[51670]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3821537877"}]': finished 2026-03-10T13:07:24.152 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:24 vm00 ceph-mon[51670]: osdmap e62: 8 total, 8 up, 8 in 2026-03-10T13:07:24.152 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:24 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:24.152 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:24 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:24.152 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:24 vm00 ceph-mon[51670]: Checking dashboard <-> RGW credentials 2026-03-10T13:07:24.152 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:24 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/3182423445' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/2653325477"}]: dispatch 2026-03-10T13:07:24.152 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:24 vm00 ceph-mon[51670]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/2653325477"}]: dispatch 2026-03-10T13:07:24.152 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:24 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:24.152 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:24 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:24.152 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:24 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:07:24.152 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:24 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:07:25.229 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:25 vm00 ceph-mon[47364]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 KiB/s rd, 682 B/s wr, 294 op/s 2026-03-10T13:07:25.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:25 vm00 ceph-mon[47364]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/2653325477"}]': finished 2026-03-10T13:07:25.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:25 vm00 ceph-mon[47364]: osdmap e63: 8 total, 8 up, 8 in 2026-03-10T13:07:25.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:25 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/1693364658' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/2653325477"}]: dispatch 2026-03-10T13:07:25.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:25 vm00 ceph-mon[47364]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/2653325477"}]: dispatch 2026-03-10T13:07:25.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:25 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:25.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:25 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:25.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:25 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:25.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:25 vm00 ceph-mon[51670]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 KiB/s rd, 682 B/s wr, 294 op/s 2026-03-10T13:07:25.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:25 vm00 ceph-mon[51670]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/2653325477"}]': finished 2026-03-10T13:07:25.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:25 vm00 ceph-mon[51670]: osdmap e63: 8 total, 8 up, 8 in 2026-03-10T13:07:25.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:25 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/1693364658' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/2653325477"}]: dispatch 2026-03-10T13:07:25.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:25 vm00 ceph-mon[51670]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/2653325477"}]: dispatch 2026-03-10T13:07:25.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:25 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:25.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:25 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:25.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:25 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:25.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:25 vm08 ceph-mon[49535]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 62 MiB used, 160 GiB / 160 GiB avail; 170 KiB/s rd, 682 B/s wr, 294 op/s 2026-03-10T13:07:25.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:25 vm08 ceph-mon[49535]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/2653325477"}]': finished 2026-03-10T13:07:25.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:25 vm08 ceph-mon[49535]: osdmap e63: 8 total, 8 up, 8 in 2026-03-10T13:07:25.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:25 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/1693364658' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/2653325477"}]: dispatch 2026-03-10T13:07:25.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:25 vm08 ceph-mon[49535]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/2653325477"}]: dispatch 2026-03-10T13:07:25.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:25 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:25.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:25 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:25.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:25 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:07:26.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:26 vm00 ceph-mon[47364]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/2653325477"}]': finished 2026-03-10T13:07:26.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:26 vm00 ceph-mon[47364]: osdmap e64: 8 total, 8 up, 8 in 2026-03-10T13:07:26.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:26 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/3840022045' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/4037273490"}]: dispatch 2026-03-10T13:07:26.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:26 vm00 ceph-mon[51670]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/2653325477"}]': finished 2026-03-10T13:07:26.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:26 vm00 ceph-mon[51670]: osdmap e64: 8 total, 8 up, 8 in 2026-03-10T13:07:26.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:26 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/3840022045' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/4037273490"}]: dispatch 2026-03-10T13:07:26.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:26 vm08 ceph-mon[49535]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/2653325477"}]': finished 2026-03-10T13:07:26.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:26 vm08 ceph-mon[49535]: osdmap e64: 8 total, 8 up, 8 in 2026-03-10T13:07:26.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:26 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/3840022045' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/4037273490"}]: dispatch 2026-03-10T13:07:27.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:27 vm00 ceph-mon[47364]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 9.2 KiB/s rd, 9 op/s 2026-03-10T13:07:27.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:27 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/3840022045' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/4037273490"}]': finished 2026-03-10T13:07:27.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:27 vm00 ceph-mon[47364]: osdmap e65: 8 total, 8 up, 8 in 2026-03-10T13:07:27.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:27 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2923971904' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/1486888834"}]: dispatch 2026-03-10T13:07:27.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:27 vm00 ceph-mon[51670]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 9.2 KiB/s rd, 9 op/s 2026-03-10T13:07:27.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:27 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/3840022045' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/4037273490"}]': finished 2026-03-10T13:07:27.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:27 vm00 ceph-mon[51670]: osdmap e65: 8 total, 8 up, 8 in 2026-03-10T13:07:27.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:27 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2923971904' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/1486888834"}]: dispatch 2026-03-10T13:07:27.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:27 vm08 ceph-mon[49535]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 9.2 KiB/s rd, 9 op/s 2026-03-10T13:07:27.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:27 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/3840022045' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/4037273490"}]': finished 2026-03-10T13:07:27.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:27 vm08 ceph-mon[49535]: osdmap e65: 8 total, 8 up, 8 in 2026-03-10T13:07:27.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:27 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/2923971904' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/1486888834"}]: dispatch 2026-03-10T13:07:28.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:28 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2923971904' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/1486888834"}]': finished 2026-03-10T13:07:28.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:28 vm00 ceph-mon[47364]: osdmap e66: 8 total, 8 up, 8 in 2026-03-10T13:07:28.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:28 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/433984645' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/4178613541"}]: dispatch 2026-03-10T13:07:28.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:28 vm00 ceph-mon[47364]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/4178613541"}]: dispatch 2026-03-10T13:07:28.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:28 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2923971904' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/1486888834"}]': finished 2026-03-10T13:07:28.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:28 vm00 ceph-mon[51670]: osdmap e66: 8 total, 8 up, 8 in 2026-03-10T13:07:28.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:28 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/433984645' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/4178613541"}]: dispatch 2026-03-10T13:07:28.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:28 vm00 ceph-mon[51670]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/4178613541"}]: dispatch 2026-03-10T13:07:28.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:28 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/2923971904' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/1486888834"}]': finished 2026-03-10T13:07:28.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:28 vm08 ceph-mon[49535]: osdmap e66: 8 total, 8 up, 8 in 2026-03-10T13:07:28.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:28 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/433984645' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/4178613541"}]: dispatch 2026-03-10T13:07:28.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:28 vm08 ceph-mon[49535]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/4178613541"}]: dispatch 2026-03-10T13:07:29.586 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:29 vm00 ceph-mon[47364]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 9.2 KiB/s rd, 9 op/s 2026-03-10T13:07:29.586 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:29 vm00 ceph-mon[47364]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/4178613541"}]': finished 2026-03-10T13:07:29.586 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:29 vm00 ceph-mon[47364]: osdmap e67: 8 total, 8 up, 8 in 2026-03-10T13:07:29.586 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:29 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/1056708764' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2030610713"}]: dispatch 2026-03-10T13:07:29.586 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:29 vm00 ceph-mon[51670]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 9.2 KiB/s rd, 9 op/s 2026-03-10T13:07:29.586 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:29 vm00 ceph-mon[51670]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/4178613541"}]': finished 2026-03-10T13:07:29.586 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:29 vm00 ceph-mon[51670]: osdmap e67: 8 total, 8 up, 8 in 2026-03-10T13:07:29.586 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:29 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/1056708764' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2030610713"}]: dispatch 2026-03-10T13:07:29.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:29 vm08 ceph-mon[49535]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 9.2 KiB/s rd, 9 op/s 2026-03-10T13:07:29.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:29 vm08 ceph-mon[49535]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/4178613541"}]': finished 2026-03-10T13:07:29.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:29 vm08 ceph-mon[49535]: osdmap e67: 8 total, 8 up, 8 in 2026-03-10T13:07:29.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:29 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/1056708764' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2030610713"}]: dispatch 2026-03-10T13:07:30.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:30 vm00 ceph-mon[47364]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:07:30.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:30 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/1056708764' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2030610713"}]': finished 2026-03-10T13:07:30.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:30 vm00 ceph-mon[47364]: osdmap e68: 8 total, 8 up, 8 in 2026-03-10T13:07:30.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:30 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/1392771190' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1209684253"}]: dispatch 2026-03-10T13:07:30.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:30 vm00 ceph-mon[47364]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1209684253"}]: dispatch 2026-03-10T13:07:30.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:30 vm00 ceph-mon[51670]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:07:30.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:30 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/1056708764' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2030610713"}]': finished 2026-03-10T13:07:30.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:30 vm00 ceph-mon[51670]: osdmap e68: 8 total, 8 up, 8 in 2026-03-10T13:07:30.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:30 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/1392771190' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1209684253"}]: dispatch 2026-03-10T13:07:30.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:30 vm00 ceph-mon[51670]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1209684253"}]: dispatch 2026-03-10T13:07:30.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:30 vm08 ceph-mon[49535]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:07:30.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:30 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/1056708764' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2030610713"}]': finished 2026-03-10T13:07:30.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:30 vm08 ceph-mon[49535]: osdmap e68: 8 total, 8 up, 8 in 2026-03-10T13:07:30.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:30 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/1392771190' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1209684253"}]: dispatch 2026-03-10T13:07:30.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:30 vm08 ceph-mon[49535]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1209684253"}]: dispatch 2026-03-10T13:07:31.462 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:07:31 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=info ts=2026-03-10T13:07:31.067Z caller=cluster.go:688 component=cluster msg="gossip settled; proceeding" elapsed=10.004872383s 2026-03-10T13:07:31.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:31 vm00 ceph-mon[47364]: pgmap v51: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:07:31.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:31 vm00 ceph-mon[47364]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1209684253"}]': finished 2026-03-10T13:07:31.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:31 vm00 ceph-mon[47364]: osdmap e69: 8 total, 8 up, 8 in 2026-03-10T13:07:31.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:31 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2412437448' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1869727854"}]: dispatch 2026-03-10T13:07:31.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:31 vm00 ceph-mon[51670]: pgmap v51: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:07:31.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:31 vm00 ceph-mon[51670]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1209684253"}]': finished 2026-03-10T13:07:31.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:31 vm00 ceph-mon[51670]: osdmap e69: 8 total, 8 up, 8 in 2026-03-10T13:07:31.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:31 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2412437448' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1869727854"}]: dispatch 2026-03-10T13:07:31.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:31 vm08 ceph-mon[49535]: pgmap v51: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:07:31.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:31 vm08 ceph-mon[49535]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1209684253"}]': finished 2026-03-10T13:07:31.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:31 vm08 ceph-mon[49535]: osdmap e69: 8 total, 8 up, 8 in 2026-03-10T13:07:31.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:31 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/2412437448' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1869727854"}]: dispatch 2026-03-10T13:07:33.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:32 vm00 ceph-mon[47364]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:07:33.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:32 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2412437448' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1869727854"}]': finished 2026-03-10T13:07:33.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:32 vm00 ceph-mon[47364]: osdmap e70: 8 total, 8 up, 8 in 2026-03-10T13:07:33.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:32 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2712913988' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/6955224"}]: dispatch 2026-03-10T13:07:33.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:32 vm00 ceph-mon[51670]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:07:33.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:32 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2412437448' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1869727854"}]': finished 2026-03-10T13:07:33.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:32 vm00 ceph-mon[51670]: osdmap e70: 8 total, 8 up, 8 in 2026-03-10T13:07:33.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:32 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2712913988' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/6955224"}]: dispatch 2026-03-10T13:07:33.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:32 vm08 ceph-mon[49535]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:07:33.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:32 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/2412437448' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1869727854"}]': finished 2026-03-10T13:07:33.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:32 vm08 ceph-mon[49535]: osdmap e70: 8 total, 8 up, 8 in 2026-03-10T13:07:33.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:32 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/2712913988' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/6955224"}]: dispatch 2026-03-10T13:07:33.503 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:07:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:07:33] "GET /metrics HTTP/1.1" 200 207662 "" "Prometheus/2.33.4" 2026-03-10T13:07:34.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:33 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2712913988' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/6955224"}]': finished 2026-03-10T13:07:34.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:33 vm00 ceph-mon[47364]: osdmap e71: 8 total, 8 up, 8 in 2026-03-10T13:07:34.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:33 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2849063839' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/1486888834"}]: dispatch 2026-03-10T13:07:34.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:33 vm00 ceph-mon[47364]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/1486888834"}]: dispatch 2026-03-10T13:07:34.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:33 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2712913988' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/6955224"}]': finished 2026-03-10T13:07:34.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:33 vm00 ceph-mon[51670]: osdmap e71: 8 total, 8 up, 8 in 2026-03-10T13:07:34.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:33 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2849063839' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/1486888834"}]: dispatch 2026-03-10T13:07:34.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:33 vm00 ceph-mon[51670]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/1486888834"}]: dispatch 2026-03-10T13:07:34.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:33 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/2712913988' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/6955224"}]': finished 2026-03-10T13:07:34.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:33 vm08 ceph-mon[49535]: osdmap e71: 8 total, 8 up, 8 in 2026-03-10T13:07:34.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:33 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/2849063839' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/1486888834"}]: dispatch 2026-03-10T13:07:34.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:33 vm08 ceph-mon[49535]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/1486888834"}]: dispatch 2026-03-10T13:07:35.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:34 vm00 ceph-mon[47364]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:07:35.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:34 vm00 ceph-mon[47364]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/1486888834"}]': finished 2026-03-10T13:07:35.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:34 vm00 ceph-mon[47364]: osdmap e72: 8 total, 8 up, 8 in 2026-03-10T13:07:35.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:34 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2415968649' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2363309494"}]: dispatch 2026-03-10T13:07:35.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:34 vm00 ceph-mon[47364]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2363309494"}]: dispatch 2026-03-10T13:07:35.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:34 vm00 ceph-mon[51670]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:07:35.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:34 vm00 ceph-mon[51670]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/1486888834"}]': finished 2026-03-10T13:07:35.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:34 vm00 ceph-mon[51670]: osdmap e72: 8 total, 8 up, 8 in 2026-03-10T13:07:35.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:34 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2415968649' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2363309494"}]: dispatch 2026-03-10T13:07:35.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:34 vm00 ceph-mon[51670]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2363309494"}]: dispatch 2026-03-10T13:07:35.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:34 vm08 ceph-mon[49535]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 68 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:07:35.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:34 vm08 ceph-mon[49535]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/1486888834"}]': finished 2026-03-10T13:07:35.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:34 vm08 ceph-mon[49535]: osdmap e72: 8 total, 8 up, 8 in 2026-03-10T13:07:35.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:34 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/2415968649' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2363309494"}]: dispatch 2026-03-10T13:07:35.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:34 vm08 ceph-mon[49535]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2363309494"}]: dispatch 2026-03-10T13:07:36.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:35 vm00 ceph-mon[47364]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2363309494"}]': finished 2026-03-10T13:07:36.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:35 vm00 ceph-mon[47364]: osdmap e73: 8 total, 8 up, 8 in 2026-03-10T13:07:36.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:35 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2308244327' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/6955224"}]: dispatch 2026-03-10T13:07:36.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:35 vm00 ceph-mon[51670]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2363309494"}]': finished 2026-03-10T13:07:36.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:35 vm00 ceph-mon[51670]: osdmap e73: 8 total, 8 up, 8 in 2026-03-10T13:07:36.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:35 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2308244327' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/6955224"}]: dispatch 2026-03-10T13:07:36.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:35 vm08 ceph-mon[49535]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2363309494"}]': finished 2026-03-10T13:07:36.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:35 vm08 ceph-mon[49535]: osdmap e73: 8 total, 8 up, 8 in 2026-03-10T13:07:36.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:35 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/2308244327' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/6955224"}]: dispatch 2026-03-10T13:07:36.770 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:07:36 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:07:36] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T13:07:36.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:36 vm08 ceph-mon[49535]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:07:36.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:36 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/2308244327' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/6955224"}]': finished 2026-03-10T13:07:36.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:36 vm08 ceph-mon[49535]: osdmap e74: 8 total, 8 up, 8 in 2026-03-10T13:07:36.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:36 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/2241962123' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/4024415478"}]: dispatch 2026-03-10T13:07:36.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:36 vm08 ceph-mon[49535]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/4024415478"}]: dispatch 2026-03-10T13:07:37.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:36 vm00 ceph-mon[47364]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:07:37.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:36 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2308244327' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/6955224"}]': finished 2026-03-10T13:07:37.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:36 vm00 ceph-mon[47364]: osdmap e74: 8 total, 8 up, 8 in 2026-03-10T13:07:37.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:36 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2241962123' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/4024415478"}]: dispatch 2026-03-10T13:07:37.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:36 vm00 ceph-mon[47364]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/4024415478"}]: dispatch 2026-03-10T13:07:37.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:36 vm00 ceph-mon[51670]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:07:37.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:36 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2308244327' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/6955224"}]': finished 2026-03-10T13:07:37.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:36 vm00 ceph-mon[51670]: osdmap e74: 8 total, 8 up, 8 in 2026-03-10T13:07:37.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:36 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2241962123' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/4024415478"}]: dispatch 2026-03-10T13:07:37.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:36 vm00 ceph-mon[51670]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/4024415478"}]: dispatch 2026-03-10T13:07:38.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:37 vm00 ceph-mon[47364]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/4024415478"}]': finished 2026-03-10T13:07:38.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:37 vm00 ceph-mon[47364]: osdmap e75: 8 total, 8 up, 8 in 2026-03-10T13:07:38.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:37 vm00 ceph-mon[51670]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/4024415478"}]': finished 2026-03-10T13:07:38.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:37 vm00 ceph-mon[51670]: osdmap e75: 8 total, 8 up, 8 in 2026-03-10T13:07:38.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:37 vm08 ceph-mon[49535]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/4024415478"}]': finished 2026-03-10T13:07:38.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:37 vm08 ceph-mon[49535]: osdmap e75: 8 total, 8 up, 8 in 2026-03-10T13:07:39.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:38 vm00 ceph-mon[47364]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:07:39.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:38 vm00 ceph-mon[51670]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:07:39.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:38 vm08 ceph-mon[49535]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:07:40.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:40 vm00 ceph-mon[47364]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:07:40.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:40 vm00 ceph-mon[51670]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:07:40.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:40 vm08 ceph-mon[49535]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:07:41.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:41 vm00 ceph-mon[47364]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 856 B/s rd, 0 op/s 2026-03-10T13:07:41.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:41 vm00 ceph-mon[51670]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 856 B/s rd, 0 op/s 2026-03-10T13:07:41.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:41 vm08 ceph-mon[49535]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 69 MiB used, 160 GiB / 160 GiB avail; 856 B/s rd, 0 op/s 2026-03-10T13:07:43.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:43 vm00 ceph-mon[47364]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 736 B/s rd, 0 op/s 2026-03-10T13:07:43.503 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:07:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:07:43] "GET /metrics HTTP/1.1" 200 207662 "" "Prometheus/2.33.4" 2026-03-10T13:07:43.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:43 vm00 ceph-mon[51670]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 736 B/s rd, 0 op/s 2026-03-10T13:07:43.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:43 vm08 ceph-mon[49535]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 736 B/s rd, 0 op/s 2026-03-10T13:07:44.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:44 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.8", "id": [7, 2]}]: dispatch 2026-03-10T13:07:44.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:44 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.e", "id": [1, 2]}]: dispatch 2026-03-10T13:07:44.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:44 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.f", "id": [1, 5]}]: dispatch 2026-03-10T13:07:44.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:44 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.8", "id": [7, 2]}]: dispatch 2026-03-10T13:07:44.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:44 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.10", "id": [1, 2]}]: dispatch 2026-03-10T13:07:44.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:44 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.e", "id": [1, 2]}]: dispatch 2026-03-10T13:07:44.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:44 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.f", "id": [1, 5]}]: dispatch 2026-03-10T13:07:44.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:44 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.10", "id": [1, 2]}]: dispatch 2026-03-10T13:07:44.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:44 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:07:44.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:44 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:07:44.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:44 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:07:44.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:44 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:07:44.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:44 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.8", "id": [7, 2]}]: dispatch 2026-03-10T13:07:44.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:44 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.e", "id": [1, 2]}]: dispatch 2026-03-10T13:07:44.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:44 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.f", "id": [1, 5]}]: dispatch 2026-03-10T13:07:44.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:44 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.8", "id": [7, 2]}]: dispatch 2026-03-10T13:07:44.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:44 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.10", "id": [1, 2]}]: dispatch 2026-03-10T13:07:44.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:44 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.e", "id": [1, 2]}]: dispatch 2026-03-10T13:07:44.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:44 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.f", "id": [1, 5]}]: dispatch 2026-03-10T13:07:44.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:44 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.10", "id": [1, 2]}]: dispatch 2026-03-10T13:07:44.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:44 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:07:44.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:44 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:07:44.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:44 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:07:44.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:44 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:07:44.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:44 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.8", "id": [7, 2]}]: dispatch 2026-03-10T13:07:44.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:44 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.e", "id": [1, 2]}]: dispatch 2026-03-10T13:07:44.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:44 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.f", "id": [1, 5]}]: dispatch 2026-03-10T13:07:44.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:44 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.8", "id": [7, 2]}]: dispatch 2026-03-10T13:07:44.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:44 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.10", "id": [1, 2]}]: dispatch 2026-03-10T13:07:44.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:44 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.e", "id": [1, 2]}]: dispatch 2026-03-10T13:07:44.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:44 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.f", "id": [1, 5]}]: dispatch 2026-03-10T13:07:44.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:44 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.10", "id": [1, 2]}]: dispatch 2026-03-10T13:07:44.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:44 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:07:44.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:44 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:07:44.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:44 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:07:44.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:44 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:07:45.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:45 vm00 ceph-mon[47364]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-10T13:07:45.507 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:45 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.8", "id": [7, 2]}]': finished 2026-03-10T13:07:45.507 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:45 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.e", "id": [1, 2]}]': finished 2026-03-10T13:07:45.507 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:45 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.f", "id": [1, 5]}]': finished 2026-03-10T13:07:45.507 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:45 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.10", "id": [1, 2]}]': finished 2026-03-10T13:07:45.507 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:45 vm00 ceph-mon[47364]: osdmap e76: 8 total, 8 up, 8 in 2026-03-10T13:07:45.507 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:45 vm00 ceph-mon[51670]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-10T13:07:45.507 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:45 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.8", "id": [7, 2]}]': finished 2026-03-10T13:07:45.507 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:45 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.e", "id": [1, 2]}]': finished 2026-03-10T13:07:45.507 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:45 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.f", "id": [1, 5]}]': finished 2026-03-10T13:07:45.507 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:45 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.10", "id": [1, 2]}]': finished 2026-03-10T13:07:45.507 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:45 vm00 ceph-mon[51670]: osdmap e76: 8 total, 8 up, 8 in 2026-03-10T13:07:45.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:45 vm08 ceph-mon[49535]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-10T13:07:45.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:45 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.8", "id": [7, 2]}]': finished 2026-03-10T13:07:45.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:45 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.e", "id": [1, 2]}]': finished 2026-03-10T13:07:45.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:45 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.f", "id": [1, 5]}]': finished 2026-03-10T13:07:45.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:45 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.10", "id": [1, 2]}]': finished 2026-03-10T13:07:45.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:45 vm08 ceph-mon[49535]: osdmap e76: 8 total, 8 up, 8 in 2026-03-10T13:07:46.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:46 vm00 ceph-mon[47364]: osdmap e77: 8 total, 8 up, 8 in 2026-03-10T13:07:46.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:46 vm00 ceph-mon[51670]: osdmap e77: 8 total, 8 up, 8 in 2026-03-10T13:07:46.770 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:07:46 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:07:46] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T13:07:46.773 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:46 vm08 ceph-mon[49535]: osdmap e77: 8 total, 8 up, 8 in 2026-03-10T13:07:48.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:47 vm00 ceph-mon[47364]: pgmap v68: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:07:48.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:47 vm00 ceph-mon[51670]: pgmap v68: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:07:48.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:47 vm08 ceph-mon[49535]: pgmap v68: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:07:49.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:48 vm00 ceph-mon[47364]: pgmap v69: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:07:49.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:48 vm00 ceph-mon[51670]: pgmap v69: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:07:49.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:48 vm08 ceph-mon[49535]: pgmap v69: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:07:50.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:49 vm00 ceph-mon[47364]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:07:50.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:49 vm00 ceph-mon[51670]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:07:50.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:49 vm08 ceph-mon[49535]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:07:51.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:50 vm00 ceph-mon[47364]: pgmap v70: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-10T13:07:51.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:50 vm00 ceph-mon[51670]: pgmap v70: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-10T13:07:51.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:50 vm08 ceph-mon[49535]: pgmap v70: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 70 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-10T13:07:53.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:52 vm00 ceph-mon[47364]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T13:07:53.253 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:07:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:07:53] "GET /metrics HTTP/1.1" 200 207632 "" "Prometheus/2.33.4" 2026-03-10T13:07:53.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:52 vm00 ceph-mon[51670]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T13:07:53.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:52 vm08 ceph-mon[49535]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T13:07:55.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:55 vm00 ceph-mon[47364]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T13:07:55.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:55 vm00 ceph-mon[51670]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T13:07:55.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:55 vm08 ceph-mon[49535]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T13:07:56.770 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:07:56 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:07:56] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T13:07:57.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:57 vm00 ceph-mon[47364]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 981 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T13:07:57.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:57 vm00 ceph-mon[51670]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 981 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T13:07:57.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:57 vm08 ceph-mon[49535]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 981 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T13:07:59.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:07:59 vm00 ceph-mon[47364]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T13:07:59.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:07:59 vm00 ceph-mon[51670]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T13:07:59.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:07:59 vm08 ceph-mon[49535]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T13:08:00.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:00 vm00 ceph-mon[47364]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:08:00.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:00 vm00 ceph-mon[51670]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:08:00.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:00 vm08 ceph-mon[49535]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:08:01.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:01 vm00 ceph-mon[47364]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T13:08:01.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:01 vm00 ceph-mon[51670]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T13:08:01.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:01 vm08 ceph-mon[49535]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T13:08:03.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:03 vm00 ceph-mon[47364]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T13:08:03.503 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:08:03 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:08:03] "GET /metrics HTTP/1.1" 200 207607 "" "Prometheus/2.33.4" 2026-03-10T13:08:03.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:03 vm00 ceph-mon[51670]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T13:08:03.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:03 vm08 ceph-mon[49535]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T13:08:05.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:05 vm00 ceph-mon[47364]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:05.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:05 vm00 ceph-mon[51670]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:05.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:05 vm08 ceph-mon[49535]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:06.770 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:08:06 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:08:06] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T13:08:07.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:07 vm00 ceph-mon[47364]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:08:07.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:07 vm00 ceph-mon[51670]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:08:07.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:07 vm08 ceph-mon[49535]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:08:09.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:09 vm00 ceph-mon[47364]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:09.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:09 vm00 ceph-mon[51670]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:09.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:09 vm08 ceph-mon[49535]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:10 vm00 ceph-mon[47364]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:08:10.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:10 vm00 ceph-mon[51670]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:08:10.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:10 vm08 ceph-mon[49535]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:08:11.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:11 vm08 ceph-mon[49535]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:11.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:11 vm00 ceph-mon[47364]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:11.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:11 vm00 ceph-mon[51670]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:13.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:13 vm00 ceph-mon[47364]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:08:13.503 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:08:13 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:08:13] "GET /metrics HTTP/1.1" 200 207607 "" "Prometheus/2.33.4" 2026-03-10T13:08:13.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:13 vm00 ceph-mon[51670]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:08:13.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:13 vm08 ceph-mon[49535]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:08:15.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:15 vm00 ceph-mon[47364]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:15.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:15 vm00 ceph-mon[51670]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:15.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:15 vm08 ceph-mon[49535]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:16.770 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:08:16 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:08:16] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T13:08:17.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:17 vm00 ceph-mon[47364]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:08:17.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:17 vm00 ceph-mon[51670]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:08:17.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:17 vm08 ceph-mon[49535]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:08:19.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:19 vm00 ceph-mon[47364]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:19.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:19 vm00 ceph-mon[51670]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:19.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:19 vm08 ceph-mon[49535]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:20.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:20 vm00 ceph-mon[47364]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:08:20.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:20 vm00 ceph-mon[51670]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:08:20.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:20 vm08 ceph-mon[49535]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:08:21.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:21 vm00 ceph-mon[47364]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:21.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:21 vm00 ceph-mon[51670]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:21.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:21 vm08 ceph-mon[49535]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:23.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:23 vm00 ceph-mon[47364]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:08:23.502 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:08:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:08:23] "GET /metrics HTTP/1.1" 200 207607 "" "Prometheus/2.33.4" 2026-03-10T13:08:23.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:23 vm00 ceph-mon[51670]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:08:23.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:23 vm08 ceph-mon[49535]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:08:25.331 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:25 vm08 ceph-mon[49535]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:25.331 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:25 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:08:25.331 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:25 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:08:25.331 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:25 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:08:25.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:25 vm00 ceph-mon[47364]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:25.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:25 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:08:25.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:25 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:08:25.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:25 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:08:25.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:25 vm00 ceph-mon[51670]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:25.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:25 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:08:25.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:25 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:08:25.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:25 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:08:26.770 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:08:26 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:08:26] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T13:08:27.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:27 vm00 ceph-mon[47364]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:08:27.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:27 vm00 ceph-mon[51670]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:08:27.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:27 vm08 ceph-mon[49535]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:08:29.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:29 vm00 ceph-mon[47364]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:29.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:29 vm00 ceph-mon[51670]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:29.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:29 vm08 ceph-mon[49535]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:30.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:30 vm00 ceph-mon[47364]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:08:30.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:30 vm00 ceph-mon[51670]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:08:30.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:30 vm08 ceph-mon[49535]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:08:31.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:31 vm00 ceph-mon[47364]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:31.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:31 vm00 ceph-mon[51670]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:31.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:31 vm08 ceph-mon[49535]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:33.352 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:08:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:08:33] "GET /metrics HTTP/1.1" 200 207589 "" "Prometheus/2.33.4" 2026-03-10T13:08:33.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:33 vm00 ceph-mon[47364]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:08:33.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:08:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:08:33.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:08:33.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:08:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:08:33.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:08:33.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:08:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:08:33.516Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:08:33.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:08:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:08:33.516Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:08:33.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:33 vm00 ceph-mon[51670]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:08:33.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:33 vm08 ceph-mon[49535]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:08:35.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:35 vm00 ceph-mon[47364]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:35.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:35 vm00 ceph-mon[51670]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:35.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:35 vm08 ceph-mon[49535]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:36.770 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:08:36 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:08:36] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T13:08:37.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:37 vm00 ceph-mon[47364]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:08:37.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:37 vm00 ceph-mon[51670]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:08:37.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:37 vm08 ceph-mon[49535]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:08:39.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:39 vm00 ceph-mon[47364]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:39.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:39 vm00 ceph-mon[51670]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:39.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:39 vm08 ceph-mon[49535]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:40.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:40 vm00 ceph-mon[47364]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:08:40.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:40 vm00 ceph-mon[51670]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:08:40.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:40 vm08 ceph-mon[49535]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:08:41.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:41 vm00 ceph-mon[47364]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:41.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:41 vm00 ceph-mon[51670]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:41.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:41 vm08 ceph-mon[49535]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:43.411 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:08:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:08:43] "GET /metrics HTTP/1.1" 200 207589 "" "Prometheus/2.33.4" 2026-03-10T13:08:43.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:43 vm00 ceph-mon[47364]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:08:43.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:08:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:08:43.505Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:08:43.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:08:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:08:43.505Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:08:43.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:08:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:08:43.508Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:08:43.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:08:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:08:43.508Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:08:43.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:08:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:08:43.509Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:08:43.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:08:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:08:43.510Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:08:43.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:43 vm00 ceph-mon[51670]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:08:43.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:43 vm08 ceph-mon[49535]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:08:44.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:44 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:08:44.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:44 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:08:44.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:44 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:08:44.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:44 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:08:44.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:44 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:08:44.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:44 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:08:44.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:44 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:08:44.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:44 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:08:44.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:44 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:08:44.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:44 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:08:44.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:44 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:08:44.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:44 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:08:45.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:45 vm00 ceph-mon[47364]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:45.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:45 vm00 ceph-mon[51670]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:45.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:45 vm08 ceph-mon[49535]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:46.770 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:08:46 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:08:46] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T13:08:47.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:47 vm00 ceph-mon[47364]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:08:47.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:47 vm00 ceph-mon[51670]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:08:47.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:47 vm08 ceph-mon[49535]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:08:49.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:49 vm00 ceph-mon[47364]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:49.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:49 vm00 ceph-mon[51670]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:49.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:49 vm08 ceph-mon[49535]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:50.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:50 vm00 ceph-mon[47364]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:08:50.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:50 vm00 ceph-mon[51670]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:08:50.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:50 vm08 ceph-mon[49535]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:08:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:51 vm00 ceph-mon[47364]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:51.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:51 vm00 ceph-mon[51670]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:51.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:51 vm08 ceph-mon[49535]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:53.252 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:08:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:08:53] "GET /metrics HTTP/1.1" 200 207610 "" "Prometheus/2.33.4" 2026-03-10T13:08:53.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:53 vm08 ceph-mon[49535]: pgmap v101: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:08:54.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:53 vm00 ceph-mon[47364]: pgmap v101: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:08:54.002 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:08:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:08:53.506Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:08:54.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:08:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:08:53.506Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:08:54.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:08:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:08:53.507Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:08:54.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:08:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:08:53.508Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:08:54.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:08:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:08:53.509Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:08:54.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:08:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:08:53.509Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:08:54.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:53 vm00 ceph-mon[51670]: pgmap v101: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:08:55.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:55 vm08 ceph-mon[49535]: pgmap v102: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:56.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:55 vm00 ceph-mon[47364]: pgmap v102: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:56.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:55 vm00 ceph-mon[51670]: pgmap v102: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:56.770 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:08:56 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:08:56] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T13:08:58.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:57 vm00 ceph-mon[47364]: pgmap v103: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:08:58.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:57 vm00 ceph-mon[51670]: pgmap v103: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:08:58.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:57 vm08 ceph-mon[49535]: pgmap v103: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:08:59.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:08:59 vm00 ceph-mon[47364]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:08:59.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:08:59 vm00 ceph-mon[51670]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:00.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:08:59 vm08 ceph-mon[49535]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:01.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:00 vm00 ceph-mon[47364]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:09:01.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:00 vm00 ceph-mon[51670]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:09:01.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:00 vm08 ceph-mon[49535]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:09:02.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:01 vm00 ceph-mon[47364]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:02.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:01 vm00 ceph-mon[51670]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:02.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:01 vm08 ceph-mon[49535]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:03.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:02 vm00 ceph-mon[47364]: pgmap v106: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:03.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:02 vm00 ceph-mon[51670]: pgmap v106: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:03.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:02 vm08 ceph-mon[49535]: pgmap v106: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:03.502 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:09:03 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:09:03] "GET /metrics HTTP/1.1" 200 207621 "" "Prometheus/2.33.4" 2026-03-10T13:09:04.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:09:03 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:09:03.506Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:09:04.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:09:03 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:09:03.506Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:09:04.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:09:03 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:09:03.508Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:09:04.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:09:03 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:09:03.508Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:09:04.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:09:03 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:09:03.509Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:09:04.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:09:03 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:09:03.510Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:09:05.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:04 vm00 ceph-mon[47364]: pgmap v107: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:05.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:04 vm00 ceph-mon[51670]: pgmap v107: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:05.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:04 vm08 ceph-mon[49535]: pgmap v107: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:06.770 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:09:06 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:09:06] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T13:09:07.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:06 vm00 ceph-mon[47364]: pgmap v108: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:07.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:06 vm00 ceph-mon[51670]: pgmap v108: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:07.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:06 vm08 ceph-mon[49535]: pgmap v108: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:09.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:08 vm00 ceph-mon[47364]: pgmap v109: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:09.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:08 vm00 ceph-mon[51670]: pgmap v109: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:09.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:08 vm08 ceph-mon[49535]: pgmap v109: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:10.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:09 vm00 ceph-mon[47364]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:09:10.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:09 vm00 ceph-mon[51670]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:09:10.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:09 vm08 ceph-mon[49535]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:09:11.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:10 vm00 ceph-mon[47364]: pgmap v110: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:11.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:10 vm00 ceph-mon[51670]: pgmap v110: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:11.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:10 vm08 ceph-mon[49535]: pgmap v110: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:13.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:12 vm00 ceph-mon[47364]: pgmap v111: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:13.252 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:09:13 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:09:13] "GET /metrics HTTP/1.1" 200 207621 "" "Prometheus/2.33.4" 2026-03-10T13:09:13.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:12 vm00 ceph-mon[51670]: pgmap v111: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:13.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:12 vm08 ceph-mon[49535]: pgmap v111: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:14.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:09:13 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:09:13.507Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:09:14.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:09:13 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:09:13.507Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:09:14.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:09:13 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:09:13.509Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:09:14.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:09:13 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:09:13.510Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:09:14.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:09:13 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:09:13.511Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:09:14.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:09:13 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:09:13.511Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:09:15.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:14 vm00 ceph-mon[47364]: pgmap v112: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:15.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:14 vm00 ceph-mon[51670]: pgmap v112: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:15.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:14 vm08 ceph-mon[49535]: pgmap v112: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:16.770 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:09:16 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:09:16] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T13:09:17.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:16 vm00 ceph-mon[47364]: pgmap v113: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:17.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:16 vm00 ceph-mon[51670]: pgmap v113: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:17.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:16 vm08 ceph-mon[49535]: pgmap v113: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:19.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:18 vm00 ceph-mon[47364]: pgmap v114: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:19.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:18 vm00 ceph-mon[51670]: pgmap v114: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:19.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:18 vm08 ceph-mon[49535]: pgmap v114: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:20.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:19 vm00 ceph-mon[47364]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:09:20.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:19 vm00 ceph-mon[51670]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:09:20.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:19 vm08 ceph-mon[49535]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:09:21.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:20 vm00 ceph-mon[47364]: pgmap v115: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:21.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:20 vm00 ceph-mon[51670]: pgmap v115: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:21.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:20 vm08 ceph-mon[49535]: pgmap v115: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:23.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:22 vm00 ceph-mon[47364]: pgmap v116: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:23.253 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:09:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:09:23] "GET /metrics HTTP/1.1" 200 207633 "" "Prometheus/2.33.4" 2026-03-10T13:09:23.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:22 vm00 ceph-mon[51670]: pgmap v116: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:23.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:22 vm08 ceph-mon[49535]: pgmap v116: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:24.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:09:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:09:23.508Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:09:24.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:09:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:09:23.509Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:09:24.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:09:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:09:23.510Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:09:24.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:09:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:09:23.510Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:09:24.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:09:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:09:23.511Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:09:24.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:09:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:09:23.511Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:09:25.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:24 vm00 ceph-mon[47364]: pgmap v117: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:25.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:24 vm00 ceph-mon[51670]: pgmap v117: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:25.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:24 vm08 ceph-mon[49535]: pgmap v117: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:26.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:25 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:09:26.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:25 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:09:26.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:25 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:09:26.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:25 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:09:26.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:25 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:09:26.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:25 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:09:26.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:25 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:09:26.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:25 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:09:26.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:25 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:09:26.770 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:09:26 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:09:26] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T13:09:28.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:27 vm00 ceph-mon[47364]: pgmap v118: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:28.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:27 vm00 ceph-mon[51670]: pgmap v118: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:28.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:27 vm08 ceph-mon[49535]: pgmap v118: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:29.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:29 vm00 ceph-mon[47364]: pgmap v119: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:29.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:29 vm00 ceph-mon[51670]: pgmap v119: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:30.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:29 vm08 ceph-mon[49535]: pgmap v119: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:31.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:30 vm00 ceph-mon[47364]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:09:31.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:30 vm00 ceph-mon[51670]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:09:31.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:30 vm08 ceph-mon[49535]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:09:32.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:31 vm00 ceph-mon[47364]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:32.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:31 vm00 ceph-mon[51670]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:32.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:31 vm08 ceph-mon[49535]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:33.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:32 vm00 ceph-mon[47364]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:33.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:32 vm00 ceph-mon[51670]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:33.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:32 vm08 ceph-mon[49535]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:33.503 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:09:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:09:33] "GET /metrics HTTP/1.1" 200 207612 "" "Prometheus/2.33.4" 2026-03-10T13:09:34.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:09:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:09:33.509Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:09:34.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:09:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:09:33.509Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:09:34.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:09:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:09:33.511Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:09:34.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:09:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:09:33.511Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:09:34.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:09:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:09:33.512Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:09:34.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:09:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:09:33.512Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:09:35.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:34 vm00 ceph-mon[47364]: pgmap v122: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:35.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:34 vm00 ceph-mon[51670]: pgmap v122: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:35.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:34 vm08 ceph-mon[49535]: pgmap v122: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:36.770 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:09:36 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:09:36] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T13:09:37.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:36 vm00 ceph-mon[47364]: pgmap v123: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:37.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:36 vm00 ceph-mon[51670]: pgmap v123: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:37.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:36 vm08 ceph-mon[49535]: pgmap v123: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:39.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:38 vm00 ceph-mon[47364]: pgmap v124: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:39.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:38 vm00 ceph-mon[51670]: pgmap v124: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:39.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:38 vm08 ceph-mon[49535]: pgmap v124: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:40.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:39 vm00 ceph-mon[47364]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:09:40.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:39 vm00 ceph-mon[51670]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:09:40.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:39 vm08 ceph-mon[49535]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:09:41.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:40 vm00 ceph-mon[47364]: pgmap v125: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:41.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:40 vm00 ceph-mon[51670]: pgmap v125: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:41.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:40 vm08 ceph-mon[49535]: pgmap v125: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:43.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:42 vm00 ceph-mon[47364]: pgmap v126: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:43.253 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:09:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:09:43] "GET /metrics HTTP/1.1" 200 207612 "" "Prometheus/2.33.4" 2026-03-10T13:09:43.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:42 vm00 ceph-mon[51670]: pgmap v126: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:43.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:42 vm08 ceph-mon[49535]: pgmap v126: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:43.861 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:09:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:09:43.510Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:09:43.862 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:09:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:09:43.510Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:09:43.862 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:09:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:09:43.512Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:09:43.862 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:09:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:09:43.513Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:09:43.862 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:09:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:09:43.513Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:09:43.862 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:09:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:09:43.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:09:44.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:43 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:09:44.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:43 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:09:44.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:43 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:09:44.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:43 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:09:44.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:43 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:09:44.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:43 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:09:44.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:43 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:09:44.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:43 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:09:44.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:43 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:09:44.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:43 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:09:44.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:43 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:09:44.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:43 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:09:45.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:44 vm00 ceph-mon[47364]: pgmap v127: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:45.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:44 vm00 ceph-mon[51670]: pgmap v127: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:45.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:44 vm08 ceph-mon[49535]: pgmap v127: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:46.770 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:09:46 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:09:46] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T13:09:47.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:46 vm00 ceph-mon[47364]: pgmap v128: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:47.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:46 vm00 ceph-mon[51670]: pgmap v128: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:47.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:46 vm08 ceph-mon[49535]: pgmap v128: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:49.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:48 vm00 ceph-mon[47364]: pgmap v129: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:49.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:48 vm00 ceph-mon[51670]: pgmap v129: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:49.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:48 vm08 ceph-mon[49535]: pgmap v129: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:50.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:49 vm00 ceph-mon[47364]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:09:50.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:49 vm00 ceph-mon[51670]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:09:50.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:49 vm08 ceph-mon[49535]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:09:51.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:50 vm00 ceph-mon[47364]: pgmap v130: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:51.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:50 vm00 ceph-mon[51670]: pgmap v130: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:51.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:50 vm08 ceph-mon[49535]: pgmap v130: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:53.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:52 vm00 ceph-mon[47364]: pgmap v131: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:53.253 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:09:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:09:53] "GET /metrics HTTP/1.1" 200 207606 "" "Prometheus/2.33.4" 2026-03-10T13:09:53.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:52 vm00 ceph-mon[51670]: pgmap v131: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:53.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:52 vm08 ceph-mon[49535]: pgmap v131: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:54.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:09:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:09:53.511Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:09:54.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:09:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:09:53.512Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:09:54.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:09:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:09:53.513Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:09:54.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:09:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:09:53.513Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:09:54.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:09:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:09:53.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:09:54.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:09:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:09:53.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:09:55.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:54 vm00 ceph-mon[47364]: pgmap v132: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:55.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:54 vm00 ceph-mon[51670]: pgmap v132: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:55.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:54 vm08 ceph-mon[49535]: pgmap v132: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:56.770 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:09:56 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:09:56] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T13:09:57.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:56 vm00 ceph-mon[47364]: pgmap v133: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:57.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:56 vm00 ceph-mon[51670]: pgmap v133: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:57.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:56 vm08 ceph-mon[49535]: pgmap v133: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:09:59.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:58 vm00 ceph-mon[47364]: pgmap v134: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:59.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:58 vm00 ceph-mon[51670]: pgmap v134: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:09:59.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:58 vm08 ceph-mon[49535]: pgmap v134: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:00.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:09:59 vm00 ceph-mon[47364]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:10:00.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:09:59 vm00 ceph-mon[51670]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:10:00.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:09:59 vm08 ceph-mon[49535]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:10:01.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:00 vm00 ceph-mon[47364]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:10:01.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:00 vm00 ceph-mon[47364]: overall HEALTH_OK 2026-03-10T13:10:01.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:00 vm00 ceph-mon[51670]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:10:01.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:00 vm00 ceph-mon[51670]: overall HEALTH_OK 2026-03-10T13:10:01.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:00 vm08 ceph-mon[49535]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:10:01.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:00 vm08 ceph-mon[49535]: overall HEALTH_OK 2026-03-10T13:10:03.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:02 vm00 ceph-mon[47364]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:03.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:02 vm00 ceph-mon[51670]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:03.253 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:10:03 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:10:03] "GET /metrics HTTP/1.1" 200 207566 "" "Prometheus/2.33.4" 2026-03-10T13:10:03.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:02 vm08 ceph-mon[49535]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:04.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:03 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:10:03.512Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:10:04.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:03 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:10:03.512Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:10:04.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:03 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:10:03.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:10:04.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:03 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:10:03.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:10:04.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:03 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:10:03.515Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:10:04.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:03 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:10:03.515Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:10:05.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:04 vm00 ceph-mon[47364]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:05.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:04 vm00 ceph-mon[51670]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:05.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:04 vm08 ceph-mon[49535]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:06.770 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:06 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:10:06] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T13:10:07.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:07 vm08 ceph-mon[49535]: pgmap v138: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:10:07.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:07 vm00 ceph-mon[47364]: pgmap v138: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:10:07.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:07 vm00 ceph-mon[51670]: pgmap v138: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:10:09.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:09 vm08 ceph-mon[49535]: pgmap v139: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:09.442 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:09 vm00 ceph-mon[47364]: pgmap v139: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:09.443 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:09 vm00 ceph-mon[51670]: pgmap v139: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:10.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:10 vm00 ceph-mon[47364]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:10:10.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:10 vm00 ceph-mon[51670]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:10:10.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:10 vm08 ceph-mon[49535]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:10:11.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:11 vm00 ceph-mon[47364]: pgmap v140: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:10:11.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:11 vm00 ceph-mon[51670]: pgmap v140: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:10:11.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:11 vm08 ceph-mon[49535]: pgmap v140: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:10:13.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:13 vm00 ceph-mon[47364]: pgmap v141: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:13.502 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:10:13 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:10:13] "GET /metrics HTTP/1.1" 200 207566 "" "Prometheus/2.33.4" 2026-03-10T13:10:13.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:13 vm00 ceph-mon[51670]: pgmap v141: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:13.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:13 vm08 ceph-mon[49535]: pgmap v141: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:14.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:13 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:10:13.513Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:10:14.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:13 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:10:13.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:10:14.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:13 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:10:13.514Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:10:14.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:13 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:10:13.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:10:14.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:13 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:10:13.516Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:10:14.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:13 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:10:13.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:10:14.382 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force' 2026-03-10T13:10:14.939 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force' 2026-03-10T13:10:15.139 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:15 vm00 ceph-mon[47364]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:15.139 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:15 vm00 ceph-mon[51670]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:15.474 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph config set global log_to_journald false --force' 2026-03-10T13:10:15.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:15 vm08 ceph-mon[49535]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:16.000 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-10T13:10:16.461 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T13:10:16.461 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (2m) 2m ago 3m 15.8M - ba2b418f427c da91a70a93ac 2026-03-10T13:10:16.461 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm08 *:3000 running (3m) 2m ago 3m 43.3M - 8.3.5 dad864ee21e9 cc6207fccfd0 2026-03-10T13:10:16.461 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dezodo vm00 running (2m) 2m ago 2m 67.2M - 3.5 e1d6a67b021e 68c149a419b4 2026-03-10T13:10:16.461 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm08 *:8443 running (4m) 2m ago 4m 417M - 17.2.0 e1d6a67b021e b15e662f34e1 2026-03-10T13:10:16.461 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:9283 running (5m) 2m ago 5m 462M - 17.2.0 e1d6a67b021e b259475ee6d8 2026-03-10T13:10:16.461 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (5m) 2m ago 5m 53.6M 2048M 17.2.0 e1d6a67b021e f0e3f322471c 2026-03-10T13:10:16.461 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm08 running (5m) 2m ago 5m 46.5M 2048M 17.2.0 e1d6a67b021e d3c1458bc898 2026-03-10T13:10:16.461 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (5m) 2m ago 5m 50.6M 2048M 17.2.0 e1d6a67b021e d00b7fd44c23 2026-03-10T13:10:16.461 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (3m) 2m ago 3m 12.2M - 1dbe0e931976 439a263972f0 2026-03-10T13:10:16.461 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm08 *:9100 running (3m) 2m ago 3m 15.4M - 1dbe0e931976 d5ba7ccd220b 2026-03-10T13:10:16.461 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (4m) 2m ago 4m 46.2M 4096M 17.2.0 e1d6a67b021e 2919c7073fa7 2026-03-10T13:10:16.461 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (4m) 2m ago 4m 50.5M 4096M 17.2.0 e1d6a67b021e 647927dc41ea 2026-03-10T13:10:16.461 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (4m) 2m ago 4m 48.8M 4096M 17.2.0 e1d6a67b021e 1e417e82c2b9 2026-03-10T13:10:16.461 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (4m) 2m ago 4m 45.5M 4096M 17.2.0 e1d6a67b021e 98bb6d7ea69f 2026-03-10T13:10:16.461 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm08 running (4m) 2m ago 4m 46.9M 4096M 17.2.0 e1d6a67b021e e349440ca776 2026-03-10T13:10:16.461 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm08 running (4m) 2m ago 4m 48.9M 4096M 17.2.0 e1d6a67b021e 9a6a2a67cc99 2026-03-10T13:10:16.461 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm08 running (3m) 2m ago 3m 44.6M 4096M 17.2.0 e1d6a67b021e c27676916d52 2026-03-10T13:10:16.461 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm08 running (3m) 2m ago 3m 46.5M 4096M 17.2.0 e1d6a67b021e bf67951990a5 2026-03-10T13:10:16.461 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm08 *:9095 running (2m) 2m ago 3m 36.5M - 514e6a882f6e 0a921c94fbae 2026-03-10T13:10:16.461 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.tvlvzo vm00 *:8000 running (3m) 2m ago 3m 90.1M - 17.2.0 e1d6a67b021e be24eac16807 2026-03-10T13:10:16.461 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm08.ljayps vm08 *:8000 running (3m) 2m ago 3m 87.5M - 17.2.0 e1d6a67b021e 67b525427823 2026-03-10T13:10:16.519 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions' 2026-03-10T13:10:16.770 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:16 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:10:16] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T13:10:17.036 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:10:17.036 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T13:10:17.036 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-10T13:10:17.036 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:10:17.036 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T13:10:17.036 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-10T13:10:17.036 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:10:17.036 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T13:10:17.036 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-10T13:10:17.036 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:10:17.036 INFO:teuthology.orchestra.run.vm00.stdout: "mds": {}, 2026-03-10T13:10:17.036 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T13:10:17.036 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-10T13:10:17.036 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:10:17.036 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T13:10:17.036 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 15 2026-03-10T13:10:17.036 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T13:10:17.036 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:10:17.091 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph -s' 2026-03-10T13:10:17.302 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:17 vm00 ceph-mon[47364]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:10:17.302 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:17 vm00 ceph-mon[47364]: from='client.24706 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:10:17.302 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:17 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2090381558' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:10:17.302 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:17 vm00 ceph-mon[51670]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:10:17.302 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:17 vm00 ceph-mon[51670]: from='client.24706 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:10:17.302 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:17 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2090381558' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:10:17.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:17 vm08 ceph-mon[49535]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:10:17.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:17 vm08 ceph-mon[49535]: from='client.24706 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:10:17.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:17 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/2090381558' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:10:17.597 INFO:teuthology.orchestra.run.vm00.stdout: cluster: 2026-03-10T13:10:17.597 INFO:teuthology.orchestra.run.vm00.stdout: id: 98a3dada-1c81-11f1-89c9-d57c120f78d5 2026-03-10T13:10:17.597 INFO:teuthology.orchestra.run.vm00.stdout: health: HEALTH_OK 2026-03-10T13:10:17.597 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T13:10:17.597 INFO:teuthology.orchestra.run.vm00.stdout: services: 2026-03-10T13:10:17.597 INFO:teuthology.orchestra.run.vm00.stdout: mon: 3 daemons, quorum a,c,b (age 5m) 2026-03-10T13:10:17.597 INFO:teuthology.orchestra.run.vm00.stdout: mgr: y(active, since 3m), standbys: x 2026-03-10T13:10:17.597 INFO:teuthology.orchestra.run.vm00.stdout: osd: 8 osds: 8 up (since 3m), 8 in (since 3m) 2026-03-10T13:10:17.597 INFO:teuthology.orchestra.run.vm00.stdout: rgw: 2 daemons active (2 hosts, 1 zones) 2026-03-10T13:10:17.597 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T13:10:17.597 INFO:teuthology.orchestra.run.vm00.stdout: data: 2026-03-10T13:10:17.597 INFO:teuthology.orchestra.run.vm00.stdout: pools: 6 pools, 161 pgs 2026-03-10T13:10:17.597 INFO:teuthology.orchestra.run.vm00.stdout: objects: 209 objects, 457 KiB 2026-03-10T13:10:17.597 INFO:teuthology.orchestra.run.vm00.stdout: usage: 71 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:10:17.598 INFO:teuthology.orchestra.run.vm00.stdout: pgs: 161 active+clean 2026-03-10T13:10:17.598 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T13:10:17.598 INFO:teuthology.orchestra.run.vm00.stdout: io: 2026-03-10T13:10:17.598 INFO:teuthology.orchestra.run.vm00.stdout: client: 1.2 KiB/s rd, 1 op/s rd, 0 op/s wr 2026-03-10T13:10:17.598 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T13:10:17.666 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ls' 2026-03-10T13:10:18.169 INFO:teuthology.orchestra.run.vm00.stdout:NAME PORTS RUNNING REFRESHED AGE PLACEMENT 2026-03-10T13:10:18.169 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager ?:9093,9094 1/1 2m ago 3m vm00=a;count:1 2026-03-10T13:10:18.169 INFO:teuthology.orchestra.run.vm00.stdout:grafana ?:3000 1/1 2m ago 3m vm08=a;count:1 2026-03-10T13:10:18.169 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo 1/1 2m ago 3m count:1 2026-03-10T13:10:18.169 INFO:teuthology.orchestra.run.vm00.stdout:mgr 2/2 2m ago 5m vm00=y;vm08=x;count:2 2026-03-10T13:10:18.169 INFO:teuthology.orchestra.run.vm00.stdout:mon 3/3 2m ago 5m vm00:192.168.123.100=a;vm00:[v2:192.168.123.100:3301,v1:192.168.123.100:6790]=c;vm08:192.168.123.108=b;count:3 2026-03-10T13:10:18.169 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter ?:9100 2/2 2m ago 3m vm00=a;vm08=b;count:2 2026-03-10T13:10:18.169 INFO:teuthology.orchestra.run.vm00.stdout:osd 8 2m ago - 2026-03-10T13:10:18.169 INFO:teuthology.orchestra.run.vm00.stdout:prometheus ?:9095 1/1 2m ago 3m vm08=a;count:1 2026-03-10T13:10:18.169 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo ?:8000 2/2 2m ago 3m count:2 2026-03-10T13:10:18.170 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:18 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/925683947' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-10T13:10:18.170 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:18 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/925683947' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-10T13:10:18.218 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch daemon redeploy "mgr.$(ceph mgr dump -f json | jq .standbys | jq .[] | jq -r .name)" --image quay.ceph.io/ceph-ci/ceph:$sha1' 2026-03-10T13:10:18.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:18 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/925683947' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-10T13:10:18.905 INFO:teuthology.orchestra.run.vm00.stdout:Scheduled to redeploy mgr.x on host 'vm08' 2026-03-10T13:10:18.967 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps --refresh' 2026-03-10T13:10:19.174 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:19 vm00 ceph-mon[47364]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:19.174 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:19 vm00 ceph-mon[47364]: from='client.24724 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:10:19.174 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:19 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/3255672808' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-10T13:10:19.174 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:19 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:10:19.174 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:19 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:10:19.174 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:19 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:10:19.174 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:19 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:10:19.174 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:19 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T13:10:19.174 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:19 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T13:10:19.174 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:19 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T13:10:19.174 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:19 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:10:19.174 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:19 vm00 ceph-mon[51670]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:19.174 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:19 vm00 ceph-mon[51670]: from='client.24724 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:10:19.174 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:19 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/3255672808' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-10T13:10:19.174 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:19 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:10:19.174 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:19 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:10:19.174 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:19 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:10:19.174 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:19 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:10:19.174 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:19 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T13:10:19.174 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:19 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T13:10:19.174 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:19 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T13:10:19.174 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:19 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:10:19.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:19 vm08 ceph-mon[49535]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:19.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:19 vm08 ceph-mon[49535]: from='client.24724 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:10:19.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:19 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/3255672808' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-10T13:10:19.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:19 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:10:19.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:19 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:10:19.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:19 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:10:19.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:19 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:10:19.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:19 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T13:10:19.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:19 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T13:10:19.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:19 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T13:10:19.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:19 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:10:19.469 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T13:10:19.470 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (2m) 2m ago 3m 15.8M - ba2b418f427c da91a70a93ac 2026-03-10T13:10:19.470 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm08 *:3000 running (3m) 2m ago 3m 43.3M - 8.3.5 dad864ee21e9 cc6207fccfd0 2026-03-10T13:10:19.470 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dezodo vm00 running (3m) 2m ago 3m 67.2M - 3.5 e1d6a67b021e 68c149a419b4 2026-03-10T13:10:19.470 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm08 *:8443 running (5m) 2m ago 5m 417M - 17.2.0 e1d6a67b021e b15e662f34e1 2026-03-10T13:10:19.470 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:9283 running (5m) 2m ago 5m 462M - 17.2.0 e1d6a67b021e b259475ee6d8 2026-03-10T13:10:19.470 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (5m) 2m ago 5m 53.6M 2048M 17.2.0 e1d6a67b021e f0e3f322471c 2026-03-10T13:10:19.470 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm08 running (5m) 2m ago 5m 46.5M 2048M 17.2.0 e1d6a67b021e d3c1458bc898 2026-03-10T13:10:19.470 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (5m) 2m ago 5m 50.6M 2048M 17.2.0 e1d6a67b021e d00b7fd44c23 2026-03-10T13:10:19.470 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (3m) 2m ago 3m 12.2M - 1dbe0e931976 439a263972f0 2026-03-10T13:10:19.470 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm08 *:9100 running (3m) 2m ago 3m 15.4M - 1dbe0e931976 d5ba7ccd220b 2026-03-10T13:10:19.470 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (4m) 2m ago 4m 46.2M 4096M 17.2.0 e1d6a67b021e 2919c7073fa7 2026-03-10T13:10:19.470 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (4m) 2m ago 4m 50.5M 4096M 17.2.0 e1d6a67b021e 647927dc41ea 2026-03-10T13:10:19.470 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (4m) 2m ago 4m 48.8M 4096M 17.2.0 e1d6a67b021e 1e417e82c2b9 2026-03-10T13:10:19.470 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (4m) 2m ago 4m 45.5M 4096M 17.2.0 e1d6a67b021e 98bb6d7ea69f 2026-03-10T13:10:19.470 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm08 running (4m) 2m ago 4m 46.9M 4096M 17.2.0 e1d6a67b021e e349440ca776 2026-03-10T13:10:19.470 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm08 running (4m) 2m ago 4m 48.9M 4096M 17.2.0 e1d6a67b021e 9a6a2a67cc99 2026-03-10T13:10:19.470 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm08 running (3m) 2m ago 3m 44.6M 4096M 17.2.0 e1d6a67b021e c27676916d52 2026-03-10T13:10:19.470 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm08 running (3m) 2m ago 3m 46.5M 4096M 17.2.0 e1d6a67b021e bf67951990a5 2026-03-10T13:10:19.470 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm08 *:9095 running (2m) 2m ago 3m 36.5M - 514e6a882f6e 0a921c94fbae 2026-03-10T13:10:19.470 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.tvlvzo vm00 *:8000 running (3m) 2m ago 3m 90.1M - 17.2.0 e1d6a67b021e be24eac16807 2026-03-10T13:10:19.470 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm08.ljayps vm08 *:8000 running (3m) 2m ago 3m 87.5M - 17.2.0 e1d6a67b021e 67b525427823 2026-03-10T13:10:19.533 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'sleep 180' 2026-03-10T13:10:20.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:20 vm00 ceph-mon[47364]: from='client.24733 -' entity='client.admin' cmd=[{"prefix": "orch daemon redeploy", "name": "mgr.x", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:10:20.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:20 vm00 ceph-mon[47364]: Schedule redeploy daemon mgr.x 2026-03-10T13:10:20.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:20 vm00 ceph-mon[47364]: Deploying daemon mgr.x on vm08 2026-03-10T13:10:20.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:20 vm00 ceph-mon[47364]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:10:20.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:20 vm00 ceph-mon[47364]: from='client.14901 -' entity='client.admin' cmd=[{"prefix": "orch ps", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:10:20.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:20 vm00 ceph-mon[51670]: from='client.24733 -' entity='client.admin' cmd=[{"prefix": "orch daemon redeploy", "name": "mgr.x", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:10:20.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:20 vm00 ceph-mon[51670]: Schedule redeploy daemon mgr.x 2026-03-10T13:10:20.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:20 vm00 ceph-mon[51670]: Deploying daemon mgr.x on vm08 2026-03-10T13:10:20.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:20 vm00 ceph-mon[51670]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:10:20.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:20 vm00 ceph-mon[51670]: from='client.14901 -' entity='client.admin' cmd=[{"prefix": "orch ps", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:10:20.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:20 vm08 ceph-mon[49535]: from='client.24733 -' entity='client.admin' cmd=[{"prefix": "orch daemon redeploy", "name": "mgr.x", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:10:20.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:20 vm08 ceph-mon[49535]: Schedule redeploy daemon mgr.x 2026-03-10T13:10:20.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:20 vm08 ceph-mon[49535]: Deploying daemon mgr.x on vm08 2026-03-10T13:10:20.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:20 vm08 ceph-mon[49535]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:10:20.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:20 vm08 ceph-mon[49535]: from='client.14901 -' entity='client.admin' cmd=[{"prefix": "orch ps", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:10:21.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:21 vm00 ceph-mon[47364]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:10:21.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:21 vm00 ceph-mon[51670]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:10:21.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:21 vm08 ceph-mon[49535]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:10:23.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:23 vm00 ceph-mon[47364]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:23.503 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:10:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:10:23] "GET /metrics HTTP/1.1" 200 207566 "" "Prometheus/2.33.4" 2026-03-10T13:10:23.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:23 vm00 ceph-mon[51670]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:23.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:23 vm08 ceph-mon[49535]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:24.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:10:23.514Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:10:24.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:10:23.515Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:10:24.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:10:23.515Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:10:24.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:10:23.516Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:10:24.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:10:23.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:10:24.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:10:23.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:10:25.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:25 vm00 ceph-mon[47364]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:25.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:25 vm00 ceph-mon[51670]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:25.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:25 vm08 ceph-mon[49535]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:26.770 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:26 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:10:26] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T13:10:27.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:27 vm00 ceph-mon[47364]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:10:27.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:27 vm00 ceph-mon[51670]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:10:27.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:27 vm08 ceph-mon[49535]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:10:29.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:29 vm00 ceph-mon[47364]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:29.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:29 vm00 ceph-mon[51670]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:29.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:29 vm08 ceph-mon[49535]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:30.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:30 vm00 ceph-mon[47364]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:10:30.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:30 vm00 ceph-mon[51670]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:10:30.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:30 vm08 ceph-mon[49535]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:10:32.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:31 vm00 ceph-mon[47364]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:10:32.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:31 vm00 ceph-mon[51670]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:10:32.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:31 vm08 ceph-mon[49535]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:10:33.503 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:10:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:10:33] "GET /metrics HTTP/1.1" 200 207565 "" "Prometheus/2.33.4" 2026-03-10T13:10:34.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:33 vm00 ceph-mon[47364]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:34.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:10:33.515Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:10:34.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:10:33.516Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:10:34.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:10:33.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:10:34.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:10:33.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:10:34.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:10:33.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:10:34.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:10:33.518Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:10:34.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:33 vm00 ceph-mon[51670]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:34.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:33 vm08 ceph-mon[49535]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:36.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:35 vm00 ceph-mon[47364]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:36.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:35 vm00 ceph-mon[51670]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:36.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:35 vm08 ceph-mon[49535]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:36.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:36 vm08 ceph-mon[49535]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:10:36.770 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:36 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:10:36] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T13:10:37.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:36 vm00 ceph-mon[47364]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:10:37.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:36 vm00 ceph-mon[51670]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:10:39.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:38 vm00 ceph-mon[47364]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:39.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:38 vm00 ceph-mon[51670]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:39.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:38 vm08 ceph-mon[49535]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:40.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:39 vm00 ceph-mon[47364]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:10:40.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:39 vm00 ceph-mon[51670]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:10:40.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:39 vm08 ceph-mon[49535]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:10:41.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:40 vm00 ceph-mon[47364]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:10:41.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:40 vm00 ceph-mon[51670]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:10:41.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:40 vm08 ceph-mon[49535]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:10:43.483 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:10:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:10:43] "GET /metrics HTTP/1.1" 200 207565 "" "Prometheus/2.33.4" 2026-03-10T13:10:43.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:43 vm00 ceph-mon[47364]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T13:10:43.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:10:43.516Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:10:43.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:10:43.516Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:10:43.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:10:43.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:10:43.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:10:43.518Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:10:43.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:10:43.518Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:10:43.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:10:43.519Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:10:43.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:43 vm00 ceph-mon[51670]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T13:10:43.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:43 vm08 ceph-mon[49535]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T13:10:45.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:44 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:10:45.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:44 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:10:45.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:44 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:10:45.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:44 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:10:45.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:44 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:10:45.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:44 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:10:45.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:44 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:10:45.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:44 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:10:45.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:44 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:10:45.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:44 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:10:45.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:44 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:10:45.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:44 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:10:46.480 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:46 vm08 ceph-mon[49535]: pgmap v157: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T13:10:46.480 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:46 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[50837]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:10:46] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T13:10:46.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:46 vm00 ceph-mon[47364]: pgmap v157: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T13:10:46.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:46 vm00 ceph-mon[51670]: pgmap v157: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T13:10:47.195 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:47 vm08 ceph-mon[49535]: pgmap v158: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:10:47.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:47 vm00 ceph-mon[47364]: pgmap v158: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:10:47.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:47 vm00 ceph-mon[51670]: pgmap v158: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:10:47.521 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:47 vm08 systemd[1]: Stopping Ceph mgr.x for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:10:47.521 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:47 vm08 bash[67917]: Error: no container with name or ID "ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr.x" found: no such container 2026-03-10T13:10:47.521 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:47 vm08 podman[67925]: 2026-03-10 13:10:47.291613889 +0000 UTC m=+0.058717453 container died b15e662f34e160f41224209b46471e84f25e053744e7f95d3513a83dfd9d6adc (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x, build-date=2022-05-03T08:36:31.336870, ceph=True, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.openshift.tags=base centos centos-stream, GIT_CLEAN=True, GIT_REPO=https://github.com/ceph/ceph-container.git, name=centos-stream, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.openshift.expose-services=, maintainer=Guillaume Abrioux , distribution-scope=public, vcs-type=git, RELEASE=HEAD, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, vendor=Red Hat, Inc., version=8, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., CEPH_POINT_RELEASE=-17.2.0, GIT_BRANCH=HEAD, architecture=x86_64, io.buildah.version=1.19.8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, com.redhat.component=centos-stream-container, release=754, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, io.k8s.display-name=CentOS Stream 8, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb) 2026-03-10T13:10:47.521 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:47 vm08 podman[67925]: 2026-03-10 13:10:47.317066738 +0000 UTC m=+0.084170312 container remove b15e662f34e160f41224209b46471e84f25e053744e7f95d3513a83dfd9d6adc (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x, io.k8s.display-name=CentOS Stream 8, build-date=2022-05-03T08:36:31.336870, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, architecture=x86_64, vendor=Red Hat, Inc., release=754, version=8, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, RELEASE=HEAD, ceph=True, io.buildah.version=1.19.8, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, name=centos-stream, GIT_BRANCH=HEAD, io.openshift.expose-services=, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, maintainer=Guillaume Abrioux , GIT_REPO=https://github.com/ceph/ceph-container.git, distribution-scope=public, io.openshift.tags=base centos centos-stream, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, CEPH_POINT_RELEASE=-17.2.0, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., GIT_CLEAN=True, vcs-type=git, com.redhat.component=centos-stream-container) 2026-03-10T13:10:47.521 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:47 vm08 bash[67925]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x 2026-03-10T13:10:47.521 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:47 vm08 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mgr.x.service: Main process exited, code=exited, status=143/n/a 2026-03-10T13:10:47.521 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:47 vm08 bash[67944]: Error: no container with name or ID "ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr.x" found: no such container 2026-03-10T13:10:47.521 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:47 vm08 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mgr.x.service: Failed with result 'exit-code'. 2026-03-10T13:10:47.521 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:47 vm08 systemd[1]: Stopped Ceph mgr.x for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:10:47.521 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:47 vm08 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mgr.x.service: Consumed 11.498s CPU time. 2026-03-10T13:10:47.805 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:47 vm08 systemd[1]: Starting Ceph mgr.x for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:10:47.805 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:47 vm08 podman[68026]: 2026-03-10 13:10:47.636801127 +0000 UTC m=+0.018886268 container create 62b908c184a8253b0ba3d99c421442a9afa370f6aa242dd221137d96d770e057 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default) 2026-03-10T13:10:47.805 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:47 vm08 podman[68026]: 2026-03-10 13:10:47.676675754 +0000 UTC m=+0.058760884 container init 62b908c184a8253b0ba3d99c421442a9afa370f6aa242dd221137d96d770e057 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, ceph=True, org.label-schema.vendor=CentOS, CEPH_REF=squid) 2026-03-10T13:10:47.805 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:47 vm08 podman[68026]: 2026-03-10 13:10:47.679710956 +0000 UTC m=+0.061796086 container start 62b908c184a8253b0ba3d99c421442a9afa370f6aa242dd221137d96d770e057 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, OSD_FLAVOR=default, ceph=True) 2026-03-10T13:10:47.805 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:47 vm08 bash[68026]: 62b908c184a8253b0ba3d99c421442a9afa370f6aa242dd221137d96d770e057 2026-03-10T13:10:47.805 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:47 vm08 podman[68026]: 2026-03-10 13:10:47.629509286 +0000 UTC m=+0.011594437 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:10:47.805 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:47 vm08 systemd[1]: Started Ceph mgr.x for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:10:47.805 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:47 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:10:47.801+0000 7f0c4be2e140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-10T13:10:48.098 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:47 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:10:47.848+0000 7f0c4be2e140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T13:10:48.354 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:48 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:10:48.327+0000 7f0c4be2e140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T13:10:48.717 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:48 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:10:48.714+0000 7f0c4be2e140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T13:10:48.727 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:48 vm00 ceph-mon[51670]: pgmap v159: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T13:10:48.727 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:48 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:10:48.727 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:48 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:10:48.727 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:48 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:10:48.727 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:48 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:10:48.730 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:48 vm00 ceph-mon[47364]: pgmap v159: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T13:10:48.730 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:48 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:10:48.730 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:48 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:10:48.730 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:48 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:10:48.730 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:48 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:10:48.973 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:48 vm08 ceph-mon[49535]: pgmap v159: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-10T13:10:48.973 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:48 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:10:48.973 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:48 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:10:48.973 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:48 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:10:48.973 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:48 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:10:48.973 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:48 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-10T13:10:48.973 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:48 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-10T13:10:48.973 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:48 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: from numpy import show_config as show_numpy_config 2026-03-10T13:10:48.973 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:48 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:10:48.823+0000 7f0c4be2e140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T13:10:48.973 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:48 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:10:48.871+0000 7f0c4be2e140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T13:10:48.973 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:48 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:10:48.968+0000 7f0c4be2e140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T13:10:49.752 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:49 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:10:49.561Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=6 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": dial tcp 192.168.123.108:8443: connect: connection refused" 2026-03-10T13:10:49.815 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:49 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:10:49.545+0000 7f0c4be2e140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T13:10:49.815 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:49 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:10:49.680+0000 7f0c4be2e140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T13:10:49.815 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:49 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:10:49.730+0000 7f0c4be2e140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T13:10:49.815 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:49 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:10:49.769+0000 7f0c4be2e140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T13:10:50.094 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:49 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:10:49.812+0000 7f0c4be2e140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T13:10:50.095 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:49 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:10:49.853+0000 7f0c4be2e140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T13:10:50.095 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:50 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:10:50.036+0000 7f0c4be2e140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T13:10:50.095 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:49 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:10:50.095 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:49 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:10:50.095 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:49 vm08 ceph-mon[49535]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:10:50.095 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:49 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:10:50.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:49 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:10:50.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:49 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:10:50.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:49 vm00 ceph-mon[51670]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:10:50.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:49 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:10:50.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:49 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:10:50.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:49 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:10:50.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:49 vm00 ceph-mon[47364]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:10:50.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:49 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:10:50.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:50 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:10:50.092+0000 7f0c4be2e140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T13:10:50.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:50 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:10:50.322+0000 7f0c4be2e140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-10T13:10:50.752 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:50 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:10:50.392Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=6 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": dial tcp 192.168.123.108:8443: connect: connection refused" 2026-03-10T13:10:50.896 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:50 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:10:50.617+0000 7f0c4be2e140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T13:10:50.896 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:50 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:10:50.654+0000 7f0c4be2e140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T13:10:50.896 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:50 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:10:50.696+0000 7f0c4be2e140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T13:10:50.896 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:50 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:10:50.778+0000 7f0c4be2e140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T13:10:50.896 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:50 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:10:50.814+0000 7f0c4be2e140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T13:10:51.182 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:50 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:10:50.894+0000 7f0c4be2e140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T13:10:51.182 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:10:51.006+0000 7f0c4be2e140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T13:10:51.182 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:10:51.143+0000 7f0c4be2e140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T13:10:51.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:10:51.179+0000 7f0c4be2e140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T13:10:51.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:10:51] ENGINE Bus STARTING 2026-03-10T13:10:51.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: CherryPy Checker: 2026-03-10T13:10:51.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: The Application mounted at '' has an empty config. 2026-03-10T13:10:51.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:10:51.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:10:51] ENGINE Serving on http://:::9283 2026-03-10T13:10:51.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:51 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:10:51] ENGINE Bus STARTED 2026-03-10T13:10:51.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:51 vm08 ceph-mon[49535]: pgmap v160: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:10:51.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:51 vm08 ceph-mon[49535]: Standby manager daemon x restarted 2026-03-10T13:10:51.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:51 vm08 ceph-mon[49535]: Standby manager daemon x started 2026-03-10T13:10:51.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:51 vm08 ceph-mon[49535]: from='mgr.? 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T13:10:51.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:51 vm08 ceph-mon[49535]: from='mgr.? 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T13:10:51.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:51 vm08 ceph-mon[49535]: from='mgr.? 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T13:10:51.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:51 vm08 ceph-mon[49535]: from='mgr.? 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T13:10:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:51 vm00 ceph-mon[47364]: pgmap v160: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:10:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:51 vm00 ceph-mon[47364]: Standby manager daemon x restarted 2026-03-10T13:10:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:51 vm00 ceph-mon[47364]: Standby manager daemon x started 2026-03-10T13:10:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:51 vm00 ceph-mon[47364]: from='mgr.? 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T13:10:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:51 vm00 ceph-mon[47364]: from='mgr.? 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T13:10:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:51 vm00 ceph-mon[47364]: from='mgr.? 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T13:10:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:51 vm00 ceph-mon[47364]: from='mgr.? 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T13:10:51.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:51 vm00 ceph-mon[51670]: pgmap v160: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:10:51.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:51 vm00 ceph-mon[51670]: Standby manager daemon x restarted 2026-03-10T13:10:51.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:51 vm00 ceph-mon[51670]: Standby manager daemon x started 2026-03-10T13:10:51.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:51 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T13:10:51.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:51 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T13:10:51.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:51 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T13:10:51.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:51 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T13:10:52.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:52 vm00 ceph-mon[47364]: mgrmap e21: y(active, since 4m), standbys: x 2026-03-10T13:10:52.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:52 vm00 ceph-mon[51670]: mgrmap e21: y(active, since 4m), standbys: x 2026-03-10T13:10:52.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:52 vm08 ceph-mon[49535]: mgrmap e21: y(active, since 4m), standbys: x 2026-03-10T13:10:53.089 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:52 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:10:52.825Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=7 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:10:53.398 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:10:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:10:53] "GET /metrics HTTP/1.1" 200 207568 "" "Prometheus/2.33.4" 2026-03-10T13:10:53.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:53 vm00 ceph-mon[47364]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:53.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:53 vm00 ceph-mon[51670]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:53.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:10:53.517Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": dial tcp 192.168.123.108:8443: connect: connection refused; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:10:53.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:10:53.517Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:10:53.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:10:53.518Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:10:53.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:10:53.518Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:10:53.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:10:53.519Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:10:53.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:10:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:10:53.519Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:10:53.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:53 vm08 ceph-mon[49535]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:55.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:55 vm00 ceph-mon[47364]: pgmap v162: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:55.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:55 vm00 ceph-mon[51670]: pgmap v162: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:55.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:55 vm08 ceph-mon[49535]: pgmap v162: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:56.770 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:10:56 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:10:56] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T13:10:57.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:57 vm00 ceph-mon[47364]: pgmap v163: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:10:57.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:57 vm00 ceph-mon[51670]: pgmap v163: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:10:57.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:57 vm08 ceph-mon[49535]: pgmap v163: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:10:59.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:10:59 vm00 ceph-mon[47364]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:59.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:10:59 vm00 ceph-mon[51670]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:10:59.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:10:59 vm08 ceph-mon[49535]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:00.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:00 vm00 ceph-mon[47364]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:11:00.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:00 vm00 ceph-mon[51670]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:11:00.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:00 vm08 ceph-mon[49535]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:11:01.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:01 vm00 ceph-mon[47364]: pgmap v165: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:11:01.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:01 vm00 ceph-mon[51670]: pgmap v165: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:11:01.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:01 vm08 ceph-mon[49535]: pgmap v165: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:11:03.427 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:11:03 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:11:03] "GET /metrics HTTP/1.1" 200 207571 "" "Prometheus/2.33.4" 2026-03-10T13:11:03.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:03 vm00 ceph-mon[47364]: pgmap v166: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-10T13:11:03.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:11:03 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:11:03.517Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:11:03.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:11:03 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:11:03.517Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:11:03.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:11:03 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:11:03.519Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:11:03.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:11:03 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:11:03.519Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:11:03.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:11:03 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:11:03.519Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:11:03.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:11:03 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:11:03.520Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:11:03.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:03 vm00 ceph-mon[51670]: pgmap v166: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-10T13:11:03.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:03 vm08 ceph-mon[49535]: pgmap v166: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-10T13:11:05.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:05 vm00 ceph-mon[47364]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-10T13:11:05.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:05 vm00 ceph-mon[51670]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-10T13:11:05.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:05 vm08 ceph-mon[49535]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-10T13:11:06.770 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:11:06 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:11:06] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T13:11:07.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:07 vm00 ceph-mon[47364]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:11:07.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:07 vm00 ceph-mon[51670]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:11:07.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:07 vm08 ceph-mon[49535]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:11:09.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:09 vm00 ceph-mon[47364]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-10T13:11:09.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:09 vm00 ceph-mon[51670]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-10T13:11:09.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:09 vm08 ceph-mon[49535]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-10T13:11:10.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:10 vm00 ceph-mon[47364]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:11:10.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:10 vm00 ceph-mon[51670]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:11:10.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:10 vm08 ceph-mon[49535]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:11:11.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:11 vm00 ceph-mon[47364]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:11:11.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:11 vm00 ceph-mon[51670]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:11:11.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:11 vm08 ceph-mon[49535]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:11:13.499 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:11:13 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:11:13] "GET /metrics HTTP/1.1" 200 207571 "" "Prometheus/2.33.4" 2026-03-10T13:11:13.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:13 vm00 ceph-mon[47364]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:13.752 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:11:13 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:11:13.518Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:11:13.752 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:11:13 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:11:13.518Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:11:13.752 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:11:13 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:11:13.519Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:11:13.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:11:13 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:11:13.520Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:11:13.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:11:13 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:11:13.520Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:11:13.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:11:13 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:11:13.521Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:11:13.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:13 vm00 ceph-mon[51670]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:13.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:13 vm08 ceph-mon[49535]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:15.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:15 vm08 ceph-mon[49535]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:16.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:15 vm00 ceph-mon[47364]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:16.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:15 vm00 ceph-mon[51670]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:16.770 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:11:16 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:11:16] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T13:11:18.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:17 vm00 ceph-mon[47364]: pgmap v173: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:11:18.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:17 vm00 ceph-mon[51670]: pgmap v173: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:11:18.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:17 vm08 ceph-mon[49535]: pgmap v173: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:11:19.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:19 vm00 ceph-mon[47364]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:19.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:19 vm00 ceph-mon[51670]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:20.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:19 vm08 ceph-mon[49535]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:21.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:20 vm00 ceph-mon[47364]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:11:21.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:20 vm00 ceph-mon[51670]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:11:21.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:20 vm08 ceph-mon[49535]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:11:22.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:21 vm00 ceph-mon[47364]: pgmap v175: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:11:22.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:21 vm00 ceph-mon[51670]: pgmap v175: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:11:22.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:21 vm08 ceph-mon[49535]: pgmap v175: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:11:23.502 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:11:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:11:23] "GET /metrics HTTP/1.1" 200 207570 "" "Prometheus/2.33.4" 2026-03-10T13:11:24.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:23 vm00 ceph-mon[47364]: pgmap v176: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:24.002 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:11:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:11:23.518Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:11:24.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:11:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:11:23.518Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:11:24.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:11:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:11:23.519Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:11:24.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:11:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:11:23.520Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:11:24.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:11:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:11:23.520Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:11:24.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:11:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:11:23.521Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:11:24.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:23 vm00 ceph-mon[51670]: pgmap v176: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:24.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:23 vm08 ceph-mon[49535]: pgmap v176: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:26.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:25 vm00 ceph-mon[47364]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:26.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:25 vm00 ceph-mon[51670]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:26.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:25 vm08 ceph-mon[49535]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:26.770 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:11:26 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:11:26] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T13:11:28.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:27 vm00 ceph-mon[47364]: pgmap v178: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:11:28.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:27 vm00 ceph-mon[51670]: pgmap v178: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:11:28.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:27 vm08 ceph-mon[49535]: pgmap v178: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:11:30.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:29 vm00 ceph-mon[47364]: pgmap v179: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:30.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:29 vm00 ceph-mon[51670]: pgmap v179: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:30.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:29 vm08 ceph-mon[49535]: pgmap v179: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:31.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:30 vm00 ceph-mon[47364]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:11:31.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:30 vm00 ceph-mon[47364]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:11:31.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:30 vm00 ceph-mon[51670]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:11:31.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:30 vm00 ceph-mon[51670]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:11:31.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:30 vm08 ceph-mon[49535]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:11:31.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:30 vm08 ceph-mon[49535]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:11:33.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:32 vm00 ceph-mon[47364]: pgmap v181: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:33.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:32 vm00 ceph-mon[51670]: pgmap v181: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:33.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:32 vm08 ceph-mon[49535]: pgmap v181: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:33.502 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:11:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:11:33] "GET /metrics HTTP/1.1" 200 207569 "" "Prometheus/2.33.4" 2026-03-10T13:11:34.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:11:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:11:33.519Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:11:34.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:11:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:11:33.519Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:11:34.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:11:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:11:33.520Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:11:34.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:11:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:11:33.521Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:11:34.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:11:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:11:33.521Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:11:34.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:11:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:11:33.522Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:11:35.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:34 vm00 ceph-mon[47364]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:35.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:34 vm00 ceph-mon[51670]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:35.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:34 vm08 ceph-mon[49535]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:36.770 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:11:36 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:11:36] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T13:11:37.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:36 vm00 ceph-mon[47364]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:11:37.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:36 vm00 ceph-mon[51670]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:11:37.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:36 vm08 ceph-mon[49535]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:11:39.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:38 vm00 ceph-mon[47364]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:39.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:38 vm00 ceph-mon[51670]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:39.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:38 vm08 ceph-mon[49535]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:40.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:39 vm00 ceph-mon[47364]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:11:40.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:39 vm00 ceph-mon[51670]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:11:40.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:39 vm08 ceph-mon[49535]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:11:41.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:40 vm00 ceph-mon[47364]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:11:41.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:40 vm00 ceph-mon[51670]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:11:41.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:40 vm08 ceph-mon[49535]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:11:43.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:42 vm00 ceph-mon[47364]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:43.253 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:11:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:11:43] "GET /metrics HTTP/1.1" 200 207569 "" "Prometheus/2.33.4" 2026-03-10T13:11:43.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:42 vm00 ceph-mon[51670]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:43.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:42 vm08 ceph-mon[49535]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:43.917 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:11:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:11:43.519Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:11:43.917 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:11:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:11:43.520Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:11:43.917 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:11:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:11:43.521Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:11:43.917 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:11:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:11:43.521Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:11:43.917 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:11:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:11:43.522Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:11:43.917 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:11:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:11:43.522Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:11:44.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:43 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:11:44.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:43 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:11:44.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:43 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:11:44.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:43 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:11:44.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:43 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:11:44.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:43 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:11:44.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:43 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:11:44.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:43 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:11:44.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:43 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:11:44.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:43 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:11:44.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:43 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:11:44.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:43 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:11:45.171 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:44 vm08 ceph-mon[49535]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:45.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:44 vm00 ceph-mon[47364]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:45.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:44 vm00 ceph-mon[51670]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:46.770 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:11:46 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:11:46] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T13:11:47.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:46 vm00 ceph-mon[47364]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:11:47.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:46 vm00 ceph-mon[51670]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:11:47.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:46 vm08 ceph-mon[49535]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:11:49.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:48 vm00 ceph-mon[51670]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:49.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:48 vm00 ceph-mon[47364]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:49.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:48 vm08 ceph-mon[49535]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:49.947 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:49 vm08 ceph-mon[49535]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:11:49.947 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:49 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:11:49.947 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:49 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:11:50.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:49 vm00 ceph-mon[51670]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:11:50.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:49 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:11:50.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:49 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:11:50.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:49 vm00 ceph-mon[47364]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:11:50.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:49 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:11:50.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:49 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:11:51.118 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:50 vm08 ceph-mon[49535]: pgmap v190: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:11:51.118 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:50 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:11:51.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:50 vm00 ceph-mon[51670]: pgmap v190: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:11:51.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:50 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:11:51.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:50 vm00 ceph-mon[47364]: pgmap v190: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:11:51.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:50 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:11:52.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:52 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:11:52.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:52 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:11:52.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:52 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:11:53.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:53 vm00 ceph-mon[47364]: pgmap v191: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:53.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:53 vm00 ceph-mon[51670]: pgmap v191: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:53.503 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:11:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:11:53] "GET /metrics HTTP/1.1" 200 207568 "" "Prometheus/2.33.4" 2026-03-10T13:11:53.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:53 vm08 ceph-mon[49535]: pgmap v191: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:54.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:11:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:11:53.520Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:11:54.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:11:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:11:53.521Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:11:54.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:11:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:11:53.522Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:11:54.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:11:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:11:53.523Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:11:54.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:11:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:11:53.523Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:11:54.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:11:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:11:53.525Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:11:55.635 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:55 vm00 ceph-mon[47364]: pgmap v192: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:55.635 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:55 vm00 ceph-mon[51670]: pgmap v192: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:55.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:55 vm08 ceph-mon[49535]: pgmap v192: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:56.770 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:11:56 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:11:56] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T13:11:57.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:57 vm00 ceph-mon[47364]: pgmap v193: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:11:57.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:57 vm00 ceph-mon[51670]: pgmap v193: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:11:57.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:57 vm08 ceph-mon[49535]: pgmap v193: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:11:59.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:11:59 vm00 ceph-mon[47364]: pgmap v194: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:59.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:11:59 vm00 ceph-mon[51670]: pgmap v194: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:11:59.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:11:59 vm08 ceph-mon[49535]: pgmap v194: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:01.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:00 vm00 ceph-mon[47364]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:12:01.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:00 vm00 ceph-mon[51670]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:12:01.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:00 vm08 ceph-mon[49535]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:12:02.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:01 vm00 ceph-mon[47364]: pgmap v195: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:12:02.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:01 vm00 ceph-mon[51670]: pgmap v195: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:12:02.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:01 vm08 ceph-mon[49535]: pgmap v195: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:12:03.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:02 vm00 ceph-mon[47364]: pgmap v196: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:03.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:02 vm00 ceph-mon[51670]: pgmap v196: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:03.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:02 vm08 ceph-mon[49535]: pgmap v196: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:03.502 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:12:03 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:12:03] "GET /metrics HTTP/1.1" 200 207571 "" "Prometheus/2.33.4" 2026-03-10T13:12:04.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:12:03 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:12:03.522Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:12:04.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:12:03 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:12:03.524Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:12:04.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:12:03 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:12:03.524Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:12:04.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:12:03 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:12:03.525Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:12:04.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:12:03 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:12:03.526Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:12:04.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:12:03 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:12:03.527Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:12:05.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:04 vm00 ceph-mon[47364]: pgmap v197: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:05.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:04 vm00 ceph-mon[51670]: pgmap v197: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:05.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:04 vm08 ceph-mon[49535]: pgmap v197: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:06.770 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:12:06 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:12:06] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T13:12:07.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:06 vm00 ceph-mon[47364]: pgmap v198: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:12:07.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:06 vm00 ceph-mon[51670]: pgmap v198: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:12:07.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:06 vm08 ceph-mon[49535]: pgmap v198: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:12:09.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:08 vm00 ceph-mon[47364]: pgmap v199: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:09.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:08 vm00 ceph-mon[51670]: pgmap v199: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:09.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:08 vm08 ceph-mon[49535]: pgmap v199: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:10.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:10 vm08 ceph-mon[49535]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:12:10.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:10 vm00 ceph-mon[47364]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:12:10.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:10 vm00 ceph-mon[51670]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:12:11.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:11 vm08 ceph-mon[49535]: pgmap v200: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:12:11.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:11 vm00 ceph-mon[47364]: pgmap v200: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:12:11.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:11 vm00 ceph-mon[51670]: pgmap v200: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:12:13.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:13 vm08 ceph-mon[49535]: pgmap v201: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:13.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:13 vm00 ceph-mon[47364]: pgmap v201: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:13.502 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:12:13 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:12:13] "GET /metrics HTTP/1.1" 200 207571 "" "Prometheus/2.33.4" 2026-03-10T13:12:13.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:13 vm00 ceph-mon[51670]: pgmap v201: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:14.002 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:12:13 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:12:13.522Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:12:14.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:12:13 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:12:13.524Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:12:14.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:12:13 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:12:13.524Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:12:14.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:12:13 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:12:13.525Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:12:14.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:12:13 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:12:13.526Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:12:14.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:12:13 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:12:13.527Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:12:15.308 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:15 vm08 ceph-mon[49535]: pgmap v202: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:15.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:15 vm00 ceph-mon[47364]: pgmap v202: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:15.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:15 vm00 ceph-mon[51670]: pgmap v202: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:16.770 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:12:16 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:12:16] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T13:12:17.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:17 vm00 ceph-mon[47364]: pgmap v203: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:12:17.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:17 vm00 ceph-mon[51670]: pgmap v203: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:12:17.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:17 vm08 ceph-mon[49535]: pgmap v203: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:12:19.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:19 vm00 ceph-mon[47364]: pgmap v204: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:19.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:19 vm00 ceph-mon[51670]: pgmap v204: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:19.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:19 vm08 ceph-mon[49535]: pgmap v204: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:20.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:20 vm00 ceph-mon[47364]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:12:20.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:20 vm00 ceph-mon[51670]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:12:20.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:20 vm08 ceph-mon[49535]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:12:21.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:21 vm00 ceph-mon[47364]: pgmap v205: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:12:21.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:21 vm00 ceph-mon[51670]: pgmap v205: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:12:21.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:21 vm08 ceph-mon[49535]: pgmap v205: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:12:23.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:23 vm00 ceph-mon[47364]: pgmap v206: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:23.502 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:12:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:12:23] "GET /metrics HTTP/1.1" 200 207572 "" "Prometheus/2.33.4" 2026-03-10T13:12:23.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:23 vm00 ceph-mon[51670]: pgmap v206: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:23.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:23 vm08 ceph-mon[49535]: pgmap v206: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:24.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:12:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:12:23.523Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:12:24.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:12:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:12:23.525Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:12:24.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:12:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:12:23.525Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:12:24.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:12:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:12:23.525Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:12:24.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:12:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:12:23.526Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:12:24.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:12:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:12:23.526Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:12:25.439 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:25 vm08 ceph-mon[49535]: pgmap v207: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:25.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:25 vm00 ceph-mon[47364]: pgmap v207: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:25.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:25 vm00 ceph-mon[51670]: pgmap v207: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:26.770 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:12:26 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:12:26] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T13:12:27.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:27 vm00 ceph-mon[47364]: pgmap v208: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:12:27.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:27 vm00 ceph-mon[51670]: pgmap v208: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:12:27.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:27 vm08 ceph-mon[49535]: pgmap v208: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:12:29.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:29 vm00 ceph-mon[47364]: pgmap v209: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:29.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:29 vm00 ceph-mon[51670]: pgmap v209: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:29.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:29 vm08 ceph-mon[49535]: pgmap v209: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:30.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:30 vm00 ceph-mon[47364]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:12:30.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:30 vm00 ceph-mon[51670]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:12:30.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:30 vm08 ceph-mon[49535]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:12:31.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:31 vm00 ceph-mon[47364]: pgmap v210: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:12:31.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:31 vm00 ceph-mon[51670]: pgmap v210: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:12:31.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:31 vm08 ceph-mon[49535]: pgmap v210: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:12:33.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:33 vm00 ceph-mon[47364]: pgmap v211: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:33.502 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:12:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:12:33] "GET /metrics HTTP/1.1" 200 207567 "" "Prometheus/2.33.4" 2026-03-10T13:12:33.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:33 vm00 ceph-mon[51670]: pgmap v211: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:33.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:33 vm08 ceph-mon[49535]: pgmap v211: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:34.002 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:12:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:12:33.524Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:12:34.002 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:12:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:12:33.525Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:12:34.002 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:12:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:12:33.525Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:12:34.002 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:12:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:12:33.525Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:12:34.002 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:12:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:12:33.526Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:12:34.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:12:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:12:33.526Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:12:35.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:35 vm00 ceph-mon[47364]: pgmap v212: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:35.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:35 vm00 ceph-mon[51670]: pgmap v212: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:35.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:35 vm08 ceph-mon[49535]: pgmap v212: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:36.770 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:12:36 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:12:36] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T13:12:37.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:37 vm00 ceph-mon[47364]: pgmap v213: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:12:37.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:37 vm00 ceph-mon[51670]: pgmap v213: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:12:37.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:37 vm08 ceph-mon[49535]: pgmap v213: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:12:39.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:39 vm00 ceph-mon[47364]: pgmap v214: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:39.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:39 vm00 ceph-mon[51670]: pgmap v214: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:39.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:39 vm08 ceph-mon[49535]: pgmap v214: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:40.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:40 vm00 ceph-mon[47364]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:12:40.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:40 vm00 ceph-mon[51670]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:12:40.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:40 vm08 ceph-mon[49535]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:12:41.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:41 vm00 ceph-mon[47364]: pgmap v215: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:12:41.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:41 vm00 ceph-mon[51670]: pgmap v215: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:12:41.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:41 vm08 ceph-mon[49535]: pgmap v215: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:12:43.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:43 vm00 ceph-mon[47364]: pgmap v216: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:43.503 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:12:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:12:43] "GET /metrics HTTP/1.1" 200 207567 "" "Prometheus/2.33.4" 2026-03-10T13:12:43.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:43 vm00 ceph-mon[51670]: pgmap v216: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:43.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:43 vm08 ceph-mon[49535]: pgmap v216: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:44.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:12:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:12:43.525Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:12:44.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:12:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:12:43.526Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:12:44.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:12:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:12:43.526Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:12:44.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:12:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:12:43.527Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:12:44.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:12:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:12:43.528Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:12:44.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:12:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:12:43.529Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:12:44.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:44 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:12:44.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:44 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:12:44.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:44 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:12:44.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:44 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:12:44.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:44 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:12:44.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:44 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:12:44.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:44 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:12:44.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:44 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:12:44.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:44 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:12:44.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:44 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:12:44.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:44 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:12:44.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:44 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:12:45.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:45 vm00 ceph-mon[47364]: pgmap v217: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:45.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:45 vm00 ceph-mon[51670]: pgmap v217: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:45.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:45 vm08 ceph-mon[49535]: pgmap v217: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:46.770 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:12:46 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:12:46] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T13:12:47.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:47 vm00 ceph-mon[47364]: pgmap v218: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:12:47.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:47 vm00 ceph-mon[51670]: pgmap v218: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:12:47.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:47 vm08 ceph-mon[49535]: pgmap v218: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:12:49.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:49 vm00 ceph-mon[47364]: pgmap v219: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:49.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:49 vm00 ceph-mon[51670]: pgmap v219: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:49.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:49 vm08 ceph-mon[49535]: pgmap v219: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:51.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:51 vm00 ceph-mon[47364]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:12:51.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:51 vm00 ceph-mon[47364]: pgmap v220: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:12:51.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:51 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:12:51.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:51 vm00 ceph-mon[47364]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:12:51.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:51 vm00 ceph-mon[51670]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:12:51.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:51 vm00 ceph-mon[51670]: pgmap v220: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:12:51.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:51 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:12:51.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:51 vm00 ceph-mon[51670]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:12:51.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:51 vm08 ceph-mon[49535]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:12:51.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:51 vm08 ceph-mon[49535]: pgmap v220: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:12:51.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:51 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:12:51.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:51 vm08 ceph-mon[49535]: from='mgr.24298 192.168.123.100:0/3272123608' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:12:52.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:52 vm00 ceph-mon[47364]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:12:52.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:52 vm00 ceph-mon[51670]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:12:52.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:52 vm08 ceph-mon[49535]: from='mgr.24298 ' entity='mgr.y' 2026-03-10T13:12:53.369 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:12:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:12:53] "GET /metrics HTTP/1.1" 200 207564 "" "Prometheus/2.33.4" 2026-03-10T13:12:53.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:53 vm00 ceph-mon[47364]: pgmap v221: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:53.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:53 vm00 ceph-mon[51670]: pgmap v221: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:53.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:12:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:12:53.525Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:12:53.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:12:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:12:53.526Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:12:53.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:12:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:12:53.527Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:12:53.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:12:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:12:53.527Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:12:53.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:12:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:12:53.527Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:12:53.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:12:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:12:53.528Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:12:53.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:53 vm08 ceph-mon[49535]: pgmap v221: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:55.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:55 vm00 ceph-mon[47364]: pgmap v222: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:55.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:55 vm00 ceph-mon[51670]: pgmap v222: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:55.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:55 vm08 ceph-mon[49535]: pgmap v222: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:56.770 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:12:56 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:12:56] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T13:12:57.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:57 vm00 ceph-mon[47364]: pgmap v223: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:12:57.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:57 vm00 ceph-mon[51670]: pgmap v223: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:12:57.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:57 vm08 ceph-mon[49535]: pgmap v223: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:12:59.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:12:59 vm00 ceph-mon[47364]: pgmap v224: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:59.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:12:59 vm00 ceph-mon[51670]: pgmap v224: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:12:59.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:12:59 vm08 ceph-mon[49535]: pgmap v224: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:13:01.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:01 vm00 ceph-mon[47364]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:13:01.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:01 vm00 ceph-mon[47364]: pgmap v225: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:13:01.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:01 vm00 ceph-mon[51670]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:13:01.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:01 vm00 ceph-mon[51670]: pgmap v225: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:13:01.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:01 vm08 ceph-mon[49535]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:13:01.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:01 vm08 ceph-mon[49535]: pgmap v225: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:13:03.400 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:13:03 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:13:03] "GET /metrics HTTP/1.1" 200 207573 "" "Prometheus/2.33.4" 2026-03-10T13:13:03.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:03 vm00 ceph-mon[47364]: pgmap v226: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:13:03.752 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:03 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:13:03.526Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:13:03.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:03 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:13:03.526Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:13:03.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:03 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:13:03.527Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:13:03.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:03 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:13:03.528Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:13:03.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:03 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:13:03.528Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:13:03.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:03 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:13:03.529Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:13:03.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:03 vm00 ceph-mon[51670]: pgmap v226: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:13:03.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:03 vm08 ceph-mon[49535]: pgmap v226: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:13:05.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:05 vm00 ceph-mon[47364]: pgmap v227: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:13:05.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:05 vm00 ceph-mon[51670]: pgmap v227: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:13:05.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:05 vm08 ceph-mon[49535]: pgmap v227: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:13:06.770 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:13:06 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:13:06] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T13:13:07.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:07 vm00 ceph-mon[47364]: pgmap v228: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:13:07.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:07 vm00 ceph-mon[51670]: pgmap v228: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:13:07.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:07 vm08 ceph-mon[49535]: pgmap v228: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:13:09.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:09 vm00 ceph-mon[47364]: pgmap v229: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:13:09.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:09 vm00 ceph-mon[51670]: pgmap v229: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:13:09.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:09 vm08 ceph-mon[49535]: pgmap v229: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:13:11.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:11 vm00 ceph-mon[47364]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:13:11.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:11 vm00 ceph-mon[47364]: pgmap v230: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:13:11.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:11 vm00 ceph-mon[51670]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:13:11.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:11 vm00 ceph-mon[51670]: pgmap v230: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:13:11.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:11 vm08 ceph-mon[49535]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:13:11.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:11 vm08 ceph-mon[49535]: pgmap v230: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:13:13.425 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:13:13 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:13:13] "GET /metrics HTTP/1.1" 200 207573 "" "Prometheus/2.33.4" 2026-03-10T13:13:13.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:13 vm00 ceph-mon[47364]: pgmap v231: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:13:13.752 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:13 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:13:13.527Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:13:13.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:13 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:13:13.527Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:13:13.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:13 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:13:13.528Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:13:13.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:13 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:13:13.529Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:13:13.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:13 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:13:13.529Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:13:13.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:13 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:13:13.530Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:13:13.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:13 vm00 ceph-mon[51670]: pgmap v231: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:13:13.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:13 vm08 ceph-mon[49535]: pgmap v231: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:13:15.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:15 vm00 ceph-mon[47364]: pgmap v232: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:13:15.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:15 vm00 ceph-mon[51670]: pgmap v232: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:13:15.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:15 vm08 ceph-mon[49535]: pgmap v232: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:13:16.770 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:13:16 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:13:16] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T13:13:17.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:17 vm08 ceph-mon[49535]: pgmap v233: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:13:17.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:17 vm00 ceph-mon[47364]: pgmap v233: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:13:17.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:17 vm00 ceph-mon[51670]: pgmap v233: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:13:19.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:19 vm00 ceph-mon[47364]: pgmap v234: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:13:19.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:19 vm00 ceph-mon[51670]: pgmap v234: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:13:19.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:19 vm08 ceph-mon[49535]: pgmap v234: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:13:19.877 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-10T13:13:20.334 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T13:13:20.334 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (5m) 2m ago 6m 24.5M - ba2b418f427c da91a70a93ac 2026-03-10T13:13:20.334 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm08 *:3000 running (6m) 89s ago 6m 49.1M - 8.3.5 dad864ee21e9 cc6207fccfd0 2026-03-10T13:13:20.334 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dezodo vm00 running (6m) 2m ago 6m 50.5M - 3.5 e1d6a67b021e 68c149a419b4 2026-03-10T13:13:20.334 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm08 *:8443,9283 running (2m) 89s ago 8m 486M - 19.2.3-678-ge911bdeb 654f31e6858e 62b908c184a8 2026-03-10T13:13:20.334 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:9283 running (8m) 2m ago 8m 470M - 17.2.0 e1d6a67b021e b259475ee6d8 2026-03-10T13:13:20.334 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (8m) 2m ago 8m 53.0M 2048M 17.2.0 e1d6a67b021e f0e3f322471c 2026-03-10T13:13:20.334 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm08 running (8m) 89s ago 8m 41.3M 2048M 17.2.0 e1d6a67b021e d3c1458bc898 2026-03-10T13:13:20.334 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (8m) 2m ago 8m 39.1M 2048M 17.2.0 e1d6a67b021e d00b7fd44c23 2026-03-10T13:13:20.334 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (6m) 2m ago 6m 19.6M - 1dbe0e931976 439a263972f0 2026-03-10T13:13:20.334 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm08 *:9100 running (6m) 89s ago 6m 22.3M - 1dbe0e931976 d5ba7ccd220b 2026-03-10T13:13:20.334 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (7m) 2m ago 7m 49.4M 4096M 17.2.0 e1d6a67b021e 2919c7073fa7 2026-03-10T13:13:20.334 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (7m) 2m ago 7m 53.7M 4096M 17.2.0 e1d6a67b021e 647927dc41ea 2026-03-10T13:13:20.334 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (7m) 2m ago 7m 51.0M 4096M 17.2.0 e1d6a67b021e 1e417e82c2b9 2026-03-10T13:13:20.334 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (7m) 2m ago 7m 48.2M 4096M 17.2.0 e1d6a67b021e 98bb6d7ea69f 2026-03-10T13:13:20.334 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm08 running (7m) 89s ago 7m 49.6M 4096M 17.2.0 e1d6a67b021e e349440ca776 2026-03-10T13:13:20.335 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm08 running (7m) 89s ago 7m 51.8M 4096M 17.2.0 e1d6a67b021e 9a6a2a67cc99 2026-03-10T13:13:20.335 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm08 running (6m) 89s ago 6m 47.3M 4096M 17.2.0 e1d6a67b021e c27676916d52 2026-03-10T13:13:20.335 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm08 running (6m) 89s ago 6m 48.8M 4096M 17.2.0 e1d6a67b021e bf67951990a5 2026-03-10T13:13:20.335 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm08 *:9095 running (5m) 89s ago 6m 58.8M - 514e6a882f6e 0a921c94fbae 2026-03-10T13:13:20.335 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.tvlvzo vm00 *:8000 running (6m) 2m ago 6m 91.2M - 17.2.0 e1d6a67b021e be24eac16807 2026-03-10T13:13:20.335 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm08.ljayps vm08 *:8000 running (6m) 89s ago 6m 89.4M - 17.2.0 e1d6a67b021e 67b525427823 2026-03-10T13:13:20.407 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions' 2026-03-10T13:13:20.856 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:13:20.856 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T13:13:20.856 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-10T13:13:20.856 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:13:20.856 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T13:13:20.856 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 1, 2026-03-10T13:13:20.856 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 1 2026-03-10T13:13:20.856 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:13:20.856 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T13:13:20.856 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-10T13:13:20.856 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:13:20.856 INFO:teuthology.orchestra.run.vm00.stdout: "mds": {}, 2026-03-10T13:13:20.856 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T13:13:20.856 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-10T13:13:20.856 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:13:20.856 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T13:13:20.856 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 14, 2026-03-10T13:13:20.856 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 1 2026-03-10T13:13:20.856 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T13:13:20.856 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:13:20.918 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph -s' 2026-03-10T13:13:21.365 INFO:teuthology.orchestra.run.vm00.stdout: cluster: 2026-03-10T13:13:21.365 INFO:teuthology.orchestra.run.vm00.stdout: id: 98a3dada-1c81-11f1-89c9-d57c120f78d5 2026-03-10T13:13:21.365 INFO:teuthology.orchestra.run.vm00.stdout: health: HEALTH_OK 2026-03-10T13:13:21.365 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T13:13:21.365 INFO:teuthology.orchestra.run.vm00.stdout: services: 2026-03-10T13:13:21.365 INFO:teuthology.orchestra.run.vm00.stdout: mon: 3 daemons, quorum a,c,b (age 8m) 2026-03-10T13:13:21.365 INFO:teuthology.orchestra.run.vm00.stdout: mgr: y(active, since 6m), standbys: x 2026-03-10T13:13:21.365 INFO:teuthology.orchestra.run.vm00.stdout: osd: 8 osds: 8 up (since 6m), 8 in (since 6m) 2026-03-10T13:13:21.365 INFO:teuthology.orchestra.run.vm00.stdout: rgw: 2 daemons active (2 hosts, 1 zones) 2026-03-10T13:13:21.365 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T13:13:21.365 INFO:teuthology.orchestra.run.vm00.stdout: data: 2026-03-10T13:13:21.365 INFO:teuthology.orchestra.run.vm00.stdout: pools: 6 pools, 161 pgs 2026-03-10T13:13:21.365 INFO:teuthology.orchestra.run.vm00.stdout: objects: 209 objects, 457 KiB 2026-03-10T13:13:21.365 INFO:teuthology.orchestra.run.vm00.stdout: usage: 71 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:13:21.365 INFO:teuthology.orchestra.run.vm00.stdout: pgs: 161 active+clean 2026-03-10T13:13:21.365 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T13:13:21.365 INFO:teuthology.orchestra.run.vm00.stdout: io: 2026-03-10T13:13:21.365 INFO:teuthology.orchestra.run.vm00.stdout: client: 1.2 KiB/s rd, 1 op/s rd, 0 op/s wr 2026-03-10T13:13:21.365 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T13:13:21.425 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph health detail' 2026-03-10T13:13:21.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:21 vm00 ceph-mon[47364]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:13:21.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:21 vm00 ceph-mon[47364]: pgmap v235: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:13:21.663 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:21 vm00 ceph-mon[47364]: from='client.24751 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:13:21.664 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:21 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/1867663703' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:13:21.664 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:21 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/3509881425' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-10T13:13:21.664 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:21 vm00 ceph-mon[51670]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:13:21.664 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:21 vm00 ceph-mon[51670]: pgmap v235: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:13:21.664 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:21 vm00 ceph-mon[51670]: from='client.24751 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:13:21.664 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:21 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/1867663703' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:13:21.664 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:21 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/3509881425' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-10T13:13:21.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:21 vm08 ceph-mon[49535]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:13:21.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:21 vm08 ceph-mon[49535]: pgmap v235: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:13:21.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:21 vm08 ceph-mon[49535]: from='client.24751 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:13:21.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:21 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/1867663703' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:13:21.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:21 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/3509881425' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-10T13:13:21.870 INFO:teuthology.orchestra.run.vm00.stdout:HEALTH_OK 2026-03-10T13:13:21.935 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.mgr | length == 2'"'"'' 2026-03-10T13:13:22.434 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T13:13:22.480 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph mgr fail' 2026-03-10T13:13:22.688 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:22 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2451670725' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T13:13:22.688 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:22 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/3029579562' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:13:22.688 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:22 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2451670725' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T13:13:22.688 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:22 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/3029579562' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:13:22.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:22 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/2451670725' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T13:13:22.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:22 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/3029579562' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:13:23.461 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:13:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:13:23] "GET /metrics HTTP/1.1" 200 207568 "" "Prometheus/2.33.4" 2026-03-10T13:13:23.461 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:23 vm00 ceph-mon[51670]: pgmap v236: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:13:23.461 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:23 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2305011888' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-10T13:13:23.461 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:23 vm00 ceph-mon[51670]: from='client.? ' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-10T13:13:23.461 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:23 vm00 ceph-mon[51670]: osdmap e78: 8 total, 8 up, 8 in 2026-03-10T13:13:23.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:23 vm00 ceph-mon[47364]: pgmap v236: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:13:23.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:23 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2305011888' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-10T13:13:23.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:23 vm00 ceph-mon[47364]: from='client.? ' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-10T13:13:23.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:23 vm00 ceph-mon[47364]: osdmap e78: 8 total, 8 up, 8 in 2026-03-10T13:13:23.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:13:23.527Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:13:23.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:13:23.528Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:13:23.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:13:23.530Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:13:23.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:13:23.530Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:13:23.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:13:23.530Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:13:23.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:13:23.531Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:13:23.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:23 vm08 ceph-mon[49535]: pgmap v236: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:13:23.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:23 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/2305011888' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-10T13:13:23.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:23 vm08 ceph-mon[49535]: from='client.? ' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-10T13:13:23.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:23 vm08 ceph-mon[49535]: osdmap e78: 8 total, 8 up, 8 in 2026-03-10T13:13:23.990 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'sleep 180' 2026-03-10T13:13:24.186 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:13:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:13:23.922+0000 7ff225a52700 -1 mgr handle_mgr_map I was active but no longer am 2026-03-10T13:13:24.186 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:13:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ignoring --setuser ceph since I am not root 2026-03-10T13:13:24.186 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:13:23 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ignoring --setgroup ceph since I am not root 2026-03-10T13:13:24.186 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:13:24 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:13:24.091+0000 7f3dfad49000 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-10T13:13:24.187 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:24 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:13:24.004Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=2 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": dial tcp 192.168.123.100:8443: connect: connection refused" 2026-03-10T13:13:24.219 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:13:23 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:13:23] ENGINE Bus STOPPING 2026-03-10T13:13:24.219 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:13:23 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:13:23] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-10T13:13:24.219 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:13:23 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:13:23] ENGINE Bus STOPPED 2026-03-10T13:13:24.219 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:13:24 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:13:24] ENGINE Bus STARTING 2026-03-10T13:13:24.454 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:13:24 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:13:24.184+0000 7f3dfad49000 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T13:13:24.454 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:24 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:13:24.232Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=2 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": dial tcp 192.168.123.108:8443: connect: connection refused" 2026-03-10T13:13:24.473 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:13:24 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:13:24] ENGINE Serving on http://:::9283 2026-03-10T13:13:24.473 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:13:24 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:13:24] ENGINE Bus STARTED 2026-03-10T13:13:24.752 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:24 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:13:24.453Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=3 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": dial tcp 192.168.123.108:8443: connect: connection refused" 2026-03-10T13:13:24.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:24 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:13:24.499Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=3 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": dial tcp 192.168.123.100:8443: connect: connection refused" 2026-03-10T13:13:24.753 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:13:24 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:13:24.608+0000 7f3dfad49000 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T13:13:25.183 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[47364]: from='client.? ' entity='client.admin' cmd='[{"prefix": "mgr fail"}]': finished 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[47364]: mgrmap e22: x(active, starting, since 0.929271s) 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[47364]: Manager daemon x is now available 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[47364]: Queued rgw.foo for migration 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[47364]: No Migration is needed for rgw spec: {'placement': {'count': 2}, 'service_id': 'foo', 'service_name': 'rgw.foo', 'service_type': 'rgw', 'spec': {'rgw_frontend_port': 8000, 'rgw_realm': 'r', 'rgw_zone': 'z'}} 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[47364]: Migrating certs/keys for iscsi.foo spec to cert store 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[47364]: Migrating certs/keys for rgw.foo spec to cert store 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[47364]: Checking for cert/key for grafana.a 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[47364]: Deploying cephadm binary to vm08 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[47364]: Deploying cephadm binary to vm00 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:13:24 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:13:24.981+0000 7f3dfad49000 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:13:25 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:13:25.125+0000 7f3dfad49000 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T13:13:25.184 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:24 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:13:24.973Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=3 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[51670]: from='client.? ' entity='client.admin' cmd='[{"prefix": "mgr fail"}]': finished 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[51670]: mgrmap e22: x(active, starting, since 0.929271s) 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T13:13:25.184 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T13:13:25.185 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T13:13:25.185 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:13:25.185 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:13:25.185 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:13:25.185 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:13:25.185 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:13:25.185 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T13:13:25.185 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T13:13:25.185 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T13:13:25.185 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[51670]: Manager daemon x is now available 2026-03-10T13:13:25.185 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:25.185 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[51670]: Queued rgw.foo for migration 2026-03-10T13:13:25.185 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[51670]: No Migration is needed for rgw spec: {'placement': {'count': 2}, 'service_id': 'foo', 'service_name': 'rgw.foo', 'service_type': 'rgw', 'spec': {'rgw_frontend_port': 8000, 'rgw_realm': 'r', 'rgw_zone': 'z'}} 2026-03-10T13:13:25.185 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:25.185 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[51670]: Migrating certs/keys for iscsi.foo spec to cert store 2026-03-10T13:13:25.185 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[51670]: Migrating certs/keys for rgw.foo spec to cert store 2026-03-10T13:13:25.185 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[51670]: Checking for cert/key for grafana.a 2026-03-10T13:13:25.185 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:25.185 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:13:25.185 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:13:25.185 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:13:25.185 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-10T13:13:25.185 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-10T13:13:25.185 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[51670]: Deploying cephadm binary to vm08 2026-03-10T13:13:25.185 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:24 vm00 ceph-mon[51670]: Deploying cephadm binary to vm00 2026-03-10T13:13:25.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:24 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:13:25.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:24 vm08 ceph-mon[49535]: from='client.? ' entity='client.admin' cmd='[{"prefix": "mgr fail"}]': finished 2026-03-10T13:13:25.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:24 vm08 ceph-mon[49535]: mgrmap e22: x(active, starting, since 0.929271s) 2026-03-10T13:13:25.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:24 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:13:25.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:24 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:13:25.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:24 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T13:13:25.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:24 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T13:13:25.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:24 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T13:13:25.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:24 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T13:13:25.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:24 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:13:25.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:24 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:13:25.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:24 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:13:25.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:24 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:13:25.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:24 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:13:25.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:24 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T13:13:25.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:24 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T13:13:25.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:24 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T13:13:25.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:24 vm08 ceph-mon[49535]: Manager daemon x is now available 2026-03-10T13:13:25.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:24 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:25.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:24 vm08 ceph-mon[49535]: Queued rgw.foo for migration 2026-03-10T13:13:25.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:24 vm08 ceph-mon[49535]: No Migration is needed for rgw spec: {'placement': {'count': 2}, 'service_id': 'foo', 'service_name': 'rgw.foo', 'service_type': 'rgw', 'spec': {'rgw_frontend_port': 8000, 'rgw_realm': 'r', 'rgw_zone': 'z'}} 2026-03-10T13:13:25.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:24 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:25.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:24 vm08 ceph-mon[49535]: Migrating certs/keys for iscsi.foo spec to cert store 2026-03-10T13:13:25.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:24 vm08 ceph-mon[49535]: Migrating certs/keys for rgw.foo spec to cert store 2026-03-10T13:13:25.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:24 vm08 ceph-mon[49535]: Checking for cert/key for grafana.a 2026-03-10T13:13:25.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:24 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:25.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:24 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:13:25.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:24 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:13:25.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:24 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:13:25.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:24 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-10T13:13:25.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:24 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-10T13:13:25.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:24 vm08 ceph-mon[49535]: Deploying cephadm binary to vm08 2026-03-10T13:13:25.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:24 vm08 ceph-mon[49535]: Deploying cephadm binary to vm00 2026-03-10T13:13:25.503 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:13:25 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:13:25.181+0000 7f3dfad49000 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T13:13:25.503 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:13:25 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:13:25.357+0000 7f3dfad49000 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T13:13:26.003 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:25 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:13:25.738Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=4 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:13:26.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:25 vm00 ceph-mon[47364]: mgrmap e23: x(active, since 1.94426s) 2026-03-10T13:13:26.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:25 vm00 ceph-mon[47364]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:13:26.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:25 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:26.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:25 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:26.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:25 vm00 ceph-mon[47364]: [10/Mar/2026:13:13:25] ENGINE Bus STARTING 2026-03-10T13:13:26.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:25 vm00 ceph-mon[51670]: mgrmap e23: x(active, since 1.94426s) 2026-03-10T13:13:26.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:25 vm00 ceph-mon[51670]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:13:26.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:25 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:26.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:25 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:26.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:25 vm00 ceph-mon[51670]: [10/Mar/2026:13:13:25] ENGINE Bus STARTING 2026-03-10T13:13:26.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:25 vm08 ceph-mon[49535]: mgrmap e23: x(active, since 1.94426s) 2026-03-10T13:13:26.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:25 vm08 ceph-mon[49535]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:13:26.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:25 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:26.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:25 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:26.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:25 vm08 ceph-mon[49535]: [10/Mar/2026:13:13:25] ENGINE Bus STARTING 2026-03-10T13:13:26.314 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:13:26 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:13:26.027+0000 7f3dfad49000 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T13:13:26.314 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:13:26 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:13:26.250+0000 7f3dfad49000 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T13:13:26.599 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:13:26 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:13:26.312+0000 7f3dfad49000 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T13:13:26.599 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:13:26 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:13:26.373+0000 7f3dfad49000 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T13:13:26.599 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:13:26 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:13:26.441+0000 7f3dfad49000 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T13:13:26.599 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:13:26 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:13:26.505+0000 7f3dfad49000 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T13:13:26.771 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:13:26 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:13:26] "GET /metrics HTTP/1.1" 200 34539 "" "Prometheus/2.33.4" 2026-03-10T13:13:26.929 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:13:26 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:13:26.846+0000 7f3dfad49000 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T13:13:27.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:27 vm00 ceph-mon[47364]: [10/Mar/2026:13:13:25] ENGINE Serving on https://192.168.123.108:7150 2026-03-10T13:13:27.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:27 vm00 ceph-mon[47364]: [10/Mar/2026:13:13:25] ENGINE Client ('192.168.123.108', 58130) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T13:13:27.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:27 vm00 ceph-mon[47364]: [10/Mar/2026:13:13:25] ENGINE Serving on http://192.168.123.108:8765 2026-03-10T13:13:27.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:27 vm00 ceph-mon[47364]: [10/Mar/2026:13:13:25] ENGINE Bus STARTED 2026-03-10T13:13:27.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:27 vm00 ceph-mon[47364]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:13:27.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:27 vm00 ceph-mon[47364]: mgrmap e24: x(active, since 3s) 2026-03-10T13:13:27.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:27 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:27.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:27 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:27.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:27 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:27.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:27 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:27.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:27 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:27.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:27 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:27.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:27 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:13:27.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:27 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:13:27.253 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:13:26 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:13:26.927+0000 7f3dfad49000 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T13:13:27.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:27 vm00 ceph-mon[51670]: [10/Mar/2026:13:13:25] ENGINE Serving on https://192.168.123.108:7150 2026-03-10T13:13:27.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:27 vm00 ceph-mon[51670]: [10/Mar/2026:13:13:25] ENGINE Client ('192.168.123.108', 58130) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T13:13:27.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:27 vm00 ceph-mon[51670]: [10/Mar/2026:13:13:25] ENGINE Serving on http://192.168.123.108:8765 2026-03-10T13:13:27.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:27 vm00 ceph-mon[51670]: [10/Mar/2026:13:13:25] ENGINE Bus STARTED 2026-03-10T13:13:27.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:27 vm00 ceph-mon[51670]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:13:27.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:27 vm00 ceph-mon[51670]: mgrmap e24: x(active, since 3s) 2026-03-10T13:13:27.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:27 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:27.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:27 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:27.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:27 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:27.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:27 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:27.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:27 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:27.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:27 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:27.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:27 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:13:27.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:27 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:13:27.264 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:27 vm08 ceph-mon[49535]: [10/Mar/2026:13:13:25] ENGINE Serving on https://192.168.123.108:7150 2026-03-10T13:13:27.264 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:27 vm08 ceph-mon[49535]: [10/Mar/2026:13:13:25] ENGINE Client ('192.168.123.108', 58130) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T13:13:27.264 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:27 vm08 ceph-mon[49535]: [10/Mar/2026:13:13:25] ENGINE Serving on http://192.168.123.108:8765 2026-03-10T13:13:27.264 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:27 vm08 ceph-mon[49535]: [10/Mar/2026:13:13:25] ENGINE Bus STARTED 2026-03-10T13:13:27.264 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:27 vm08 ceph-mon[49535]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:13:27.265 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:27 vm08 ceph-mon[49535]: mgrmap e24: x(active, since 3s) 2026-03-10T13:13:27.265 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:27 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:27.265 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:27 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:27.265 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:27 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:27.265 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:27 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:27.265 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:27 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:27.265 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:27 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:27.265 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:27 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:13:27.265 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:27 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:13:27.866 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:13:27 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:13:27.566+0000 7f3dfad49000 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T13:13:27.866 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:13:27 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:13:27.642+0000 7f3dfad49000 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T13:13:27.866 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:13:27 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:13:27.709+0000 7f3dfad49000 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T13:13:28.140 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:13:27 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:13:27.864+0000 7f3dfad49000 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T13:13:28.140 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:13:27 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:13:27.940+0000 7f3dfad49000 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T13:13:28.140 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:13:28 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:13:28.045+0000 7f3dfad49000 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T13:13:28.455 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:28.455 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:28.455 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:13:28.455 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:13:28.455 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:13:28.455 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:13:28.455 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[47364]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T13:13:28.455 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[47364]: Updating vm08:/etc/ceph/ceph.conf 2026-03-10T13:13:28.455 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[47364]: Updating vm00:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.conf 2026-03-10T13:13:28.455 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[47364]: Updating vm08:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.conf 2026-03-10T13:13:28.455 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[47364]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:13:28.455 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[47364]: Updating vm08:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:13:28.455 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[47364]: Updating vm00:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.client.admin.keyring 2026-03-10T13:13:28.455 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[47364]: Updating vm08:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.client.admin.keyring 2026-03-10T13:13:28.455 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:28.455 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:28.455 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:28.455 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:28.455 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:28.455 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:28.455 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:28.455 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:28.455 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:28.455 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[47364]: mgrmap e25: x(active, since 5s) 2026-03-10T13:13:28.455 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:13:28 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:13:28.159+0000 7f3dfad49000 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T13:13:28.455 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:28 vm00 systemd[1]: Stopping Ceph node-exporter.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:13:28.455 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:28 vm00 podman[78342]: 2026-03-10 13:13:28.374131907 +0000 UTC m=+0.027243405 container died 439a263972f093b39f7400664b8d35f7a329fe52988248d38b33d3afa6cf6629 (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-10T13:13:28.455 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:28 vm00 podman[78342]: 2026-03-10 13:13:28.393679966 +0000 UTC m=+0.046791454 container remove 439a263972f093b39f7400664b8d35f7a329fe52988248d38b33d3afa6cf6629 (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-10T13:13:28.455 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:28 vm00 bash[78342]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a 2026-03-10T13:13:28.455 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:28 vm00 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@node-exporter.a.service: Main process exited, code=exited, status=143/n/a 2026-03-10T13:13:28.455 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:28.455 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:28.455 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:13:28.455 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:13:28.455 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:13:28.455 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:13:28.456 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[51670]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T13:13:28.456 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[51670]: Updating vm08:/etc/ceph/ceph.conf 2026-03-10T13:13:28.456 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[51670]: Updating vm00:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.conf 2026-03-10T13:13:28.456 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[51670]: Updating vm08:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.conf 2026-03-10T13:13:28.456 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[51670]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:13:28.456 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[51670]: Updating vm08:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:13:28.456 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[51670]: Updating vm00:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.client.admin.keyring 2026-03-10T13:13:28.456 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[51670]: Updating vm08:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.client.admin.keyring 2026-03-10T13:13:28.456 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:28.456 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:28.456 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:28.456 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:28.456 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:28.456 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:28.456 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:28.456 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:28.456 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:28.456 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:28 vm00 ceph-mon[51670]: mgrmap e25: x(active, since 5s) 2026-03-10T13:13:28.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:28 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:28.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:28 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:28.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:28 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:13:28.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:28 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:13:28.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:28 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:13:28.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:28 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:13:28.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:28 vm08 ceph-mon[49535]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T13:13:28.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:28 vm08 ceph-mon[49535]: Updating vm08:/etc/ceph/ceph.conf 2026-03-10T13:13:28.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:28 vm08 ceph-mon[49535]: Updating vm00:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.conf 2026-03-10T13:13:28.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:28 vm08 ceph-mon[49535]: Updating vm08:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.conf 2026-03-10T13:13:28.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:28 vm08 ceph-mon[49535]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:13:28.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:28 vm08 ceph-mon[49535]: Updating vm08:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:13:28.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:28 vm08 ceph-mon[49535]: Updating vm00:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.client.admin.keyring 2026-03-10T13:13:28.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:28 vm08 ceph-mon[49535]: Updating vm08:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.client.admin.keyring 2026-03-10T13:13:28.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:28 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:28.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:28 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:28.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:28 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:28.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:28 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:28.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:28 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:28.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:28 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:28.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:28 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:28.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:28 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:28.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:28 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:28.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:28 vm08 ceph-mon[49535]: mgrmap e25: x(active, since 5s) 2026-03-10T13:13:28.723 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:13:28 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:13:28.480+0000 7f3dfad49000 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T13:13:28.723 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:13:28 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:13:28.565+0000 7f3dfad49000 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T13:13:28.723 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:13:28 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: [10/Mar/2026:13:13:28] ENGINE Bus STARTING 2026-03-10T13:13:28.723 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:13:28 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: CherryPy Checker: 2026-03-10T13:13:28.723 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:13:28 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: The Application mounted at '' has an empty config. 2026-03-10T13:13:28.723 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:13:28 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: 2026-03-10T13:13:28.723 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:13:28 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: [10/Mar/2026:13:13:28] ENGINE Serving on http://:::9283 2026-03-10T13:13:28.723 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:13:28 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: [10/Mar/2026:13:13:28] ENGINE Bus STARTED 2026-03-10T13:13:28.723 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:28 vm00 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@node-exporter.a.service: Failed with result 'exit-code'. 2026-03-10T13:13:28.723 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:28 vm00 systemd[1]: Stopped Ceph node-exporter.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:13:28.723 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:28 vm00 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@node-exporter.a.service: Consumed 1.335s CPU time. 2026-03-10T13:13:28.723 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:28 vm00 systemd[1]: Starting Ceph node-exporter.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:13:29.003 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:28 vm00 bash[78482]: Trying to pull quay.io/prometheus/node-exporter:v1.7.0... 2026-03-10T13:13:29.020 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:28 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[66905]: ts=2026-03-10T13:13:28.658Z caller=manager.go:609 level=warn component="rule manager" group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on(ceph_daemon) group_left(hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: |\n OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked down and back up at {{ $value | humanize }} times once a minute for 5 minutes. This could indicate a network issue (latency, packet drop, disruption) on the clusters \"cluster network\". Check the network environment on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSD's to flap (mark each other out)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.108:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.100:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:13:29.020 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:28 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[66905]: ts=2026-03-10T13:13:28.658Z caller=manager.go:609 level=warn component="rule manager" group=osd msg="Evaluating rule failed" rule="alert: CephPGImbalance\nexpr: abs(((ceph_osd_numpg > 0) - on(job) group_left() avg by(job) (ceph_osd_numpg\n > 0)) / on(job) group_left() avg by(job) (ceph_osd_numpg > 0)) * on(ceph_daemon)\n group_left(hostname) ceph_osd_metadata > 0.3\nfor: 5m\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.5\n severity: warning\n type: ceph_default\nannotations:\n description: |\n OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} deviates by more than 30% from average PG count.\n summary: PG allocations are not balanced across devices\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.108:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.100:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:13:29.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:29 vm00 ceph-mon[47364]: Reconfiguring node-exporter.a (dependencies changed)... 2026-03-10T13:13:29.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:29 vm00 ceph-mon[47364]: Deploying daemon node-exporter.a on vm00 2026-03-10T13:13:29.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:29 vm00 ceph-mon[47364]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:13:29.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:29 vm00 ceph-mon[47364]: Standby manager daemon y started 2026-03-10T13:13:29.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:29 vm00 ceph-mon[47364]: from='mgr.? 192.168.123.100:0/3235775856' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-10T13:13:29.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:29 vm00 ceph-mon[47364]: from='mgr.? 192.168.123.100:0/3235775856' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T13:13:29.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:29 vm00 ceph-mon[47364]: from='mgr.? 192.168.123.100:0/3235775856' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-10T13:13:29.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:29 vm00 ceph-mon[47364]: from='mgr.? 192.168.123.100:0/3235775856' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T13:13:29.503 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:29 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:13:29.255Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=6 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:13:29.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:29 vm00 ceph-mon[51670]: Reconfiguring node-exporter.a (dependencies changed)... 2026-03-10T13:13:29.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:29 vm00 ceph-mon[51670]: Deploying daemon node-exporter.a on vm00 2026-03-10T13:13:29.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:29 vm00 ceph-mon[51670]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:13:29.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:29 vm00 ceph-mon[51670]: Standby manager daemon y started 2026-03-10T13:13:29.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:29 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.100:0/3235775856' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-10T13:13:29.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:29 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.100:0/3235775856' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T13:13:29.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:29 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.100:0/3235775856' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-10T13:13:29.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:29 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.100:0/3235775856' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T13:13:29.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:29 vm08 ceph-mon[49535]: Reconfiguring node-exporter.a (dependencies changed)... 2026-03-10T13:13:29.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:29 vm08 ceph-mon[49535]: Deploying daemon node-exporter.a on vm00 2026-03-10T13:13:29.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:29 vm08 ceph-mon[49535]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:13:29.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:29 vm08 ceph-mon[49535]: Standby manager daemon y started 2026-03-10T13:13:29.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:29 vm08 ceph-mon[49535]: from='mgr.? 192.168.123.100:0/3235775856' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-10T13:13:29.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:29 vm08 ceph-mon[49535]: from='mgr.? 192.168.123.100:0/3235775856' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T13:13:29.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:29 vm08 ceph-mon[49535]: from='mgr.? 192.168.123.100:0/3235775856' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-10T13:13:29.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:29 vm08 ceph-mon[49535]: from='mgr.? 192.168.123.100:0/3235775856' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T13:13:30.252 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:29 vm00 bash[78482]: Getting image source signatures 2026-03-10T13:13:30.252 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:29 vm00 bash[78482]: Copying blob sha256:324153f2810a9927fcce320af9e4e291e0b6e805cbdd1f338386c756b9defa24 2026-03-10T13:13:30.252 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:29 vm00 bash[78482]: Copying blob sha256:2abcce694348cd2c949c0e98a7400ebdfd8341021bcf6b541bc72033ce982510 2026-03-10T13:13:30.253 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:29 vm00 bash[78482]: Copying blob sha256:455fd88e5221bc1e278ef2d059cd70e4df99a24e5af050ede621534276f6cf9a 2026-03-10T13:13:30.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:30 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T13:13:30.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:30 vm08 ceph-mon[49535]: mgrmap e26: x(active, since 6s), standbys: y 2026-03-10T13:13:30.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:30 vm08 ceph-mon[49535]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:13:30.683 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T13:13:30.683 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-mon[47364]: mgrmap e26: x(active, since 6s), standbys: y 2026-03-10T13:13:30.683 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-mon[47364]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:13:30.683 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:30 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T13:13:30.684 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:30 vm00 ceph-mon[51670]: mgrmap e26: x(active, since 6s), standbys: y 2026-03-10T13:13:30.684 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:30 vm00 ceph-mon[51670]: from='client.14736 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:13:31.004 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 bash[78482]: Copying config sha256:72c9c208898624938c9e4183d6686ea4a5fd3f912bc29bc3f00147924c521a3e 2026-03-10T13:13:31.004 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 bash[78482]: Writing manifest to image destination 2026-03-10T13:13:31.004 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 podman[78482]: 2026-03-10 13:13:30.69682242 +0000 UTC m=+1.982861743 container create bcf8834016191e2ea6ad1604235b1fe114e314c779dc2dd6ccb2b81c4a7bb9d7 (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-10T13:13:31.004 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 podman[78482]: 2026-03-10 13:13:30.72422259 +0000 UTC m=+2.010261922 container init bcf8834016191e2ea6ad1604235b1fe114e314c779dc2dd6ccb2b81c4a7bb9d7 (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-10T13:13:31.004 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 podman[78482]: 2026-03-10 13:13:30.72674929 +0000 UTC m=+2.012788613 container start bcf8834016191e2ea6ad1604235b1fe114e314c779dc2dd6ccb2b81c4a7bb9d7 (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-10T13:13:31.004 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 bash[78482]: bcf8834016191e2ea6ad1604235b1fe114e314c779dc2dd6ccb2b81c4a7bb9d7 2026-03-10T13:13:31.004 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 podman[78482]: 2026-03-10 13:13:30.691410428 +0000 UTC m=+1.977449751 image pull 72c9c208898624938c9e4183d6686ea4a5fd3f912bc29bc3f00147924c521a3e quay.io/prometheus/node-exporter:v1.7.0 2026-03-10T13:13:31.004 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 systemd[1]: Started Ceph node-exporter.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.739Z caller=node_exporter.go:192 level=info msg="Starting node_exporter" version="(version=1.7.0, branch=HEAD, revision=7333465abf9efba81876303bb57e6fadb946041b)" 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.739Z caller=node_exporter.go:193 level=info msg="Build context" build_context="(go=go1.21.4, platform=linux/amd64, user=root@35918982f6d8, date=20231112-23:53:35, tags=netgo osusergo static_build)" 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=filesystem_common.go:111 level=info collector=filesystem msg="Parsed flag --collector.filesystem.mount-points-exclude" flag=^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+|var/lib/containers/storage/.+)($|/) 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=filesystem_common.go:113 level=info collector=filesystem msg="Parsed flag --collector.filesystem.fs-types-exclude" flag=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$ 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=diskstats_common.go:111 level=info collector=diskstats msg="Parsed flag --collector.diskstats.device-exclude" flag=^(ram|loop|fd|(h|s|v|xv)d[a-z]|nvme\d+n\d+p)\d+$ 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=diskstats_linux.go:265 level=error collector=diskstats msg="Failed to open directory, disabling udev device properties" path=/run/udev/data 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:110 level=info msg="Enabled collectors" 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=arp 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=bcache 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=bonding 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=btrfs 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=conntrack 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=cpu 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=cpufreq 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=diskstats 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=dmi 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=edac 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=entropy 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=fibrechannel 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=filefd 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=filesystem 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=hwmon 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=infiniband 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=ipvs 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=loadavg 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=mdadm 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=meminfo 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=netclass 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=netdev 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=netstat 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=nfs 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=nfsd 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=nvme 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=os 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=powersupplyclass 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=pressure 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=rapl 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=schedstat 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=selinux 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=sockstat 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=softnet 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=stat 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=tapestats 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=textfile 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=thermal_zone 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=time 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=udp_queues 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=uname 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=vmstat 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=xfs 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.740Z caller=node_exporter.go:117 level=info collector=zfs 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.741Z caller=tls_config.go:274 level=info msg="Listening on" address=[::]:9100 2026-03-10T13:13:31.005 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:13:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a[78538]: ts=2026-03-10T13:13:30.741Z caller=tls_config.go:277 level=info msg="TLS is disabled." http2=false address=[::]:9100 2026-03-10T13:13:31.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:31 vm08 ceph-mon[49535]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:13:31.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:31 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:31.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:31 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:31.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:31 vm00 ceph-mon[47364]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:13:31.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:31 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:31.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:31 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:31.753 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:31 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:13:31.301Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=6 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:13:31.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:31 vm00 ceph-mon[51670]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:13:31.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:31 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:31.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:31 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:32.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:32 vm08 ceph-mon[49535]: Reconfiguring alertmanager.a (dependencies changed)... 2026-03-10T13:13:32.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:32 vm08 ceph-mon[49535]: Deploying daemon alertmanager.a on vm00 2026-03-10T13:13:32.534 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:32 vm00 ceph-mon[47364]: Reconfiguring alertmanager.a (dependencies changed)... 2026-03-10T13:13:32.535 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:32 vm00 ceph-mon[47364]: Deploying daemon alertmanager.a on vm00 2026-03-10T13:13:32.535 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:32 vm00 ceph-mon[51670]: Reconfiguring alertmanager.a (dependencies changed)... 2026-03-10T13:13:32.535 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:32 vm00 ceph-mon[51670]: Deploying daemon alertmanager.a on vm00 2026-03-10T13:13:33.253 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:13:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[47576]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:13:33] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T13:13:33.517 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:33 vm00 ceph-mon[47364]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-10T13:13:33.518 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:33 vm00 ceph-mon[51670]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-10T13:13:33.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:33 vm08 ceph-mon[49535]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-10T13:13:33.769 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:13:33.529Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:13:33.769 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=error ts=2026-03-10T13:13:33.529Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:13:33.769 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:13:33.531Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:13:33.769 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:13:33.531Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:13:33.769 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:13:33.532Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.108:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.108 because it doesn't contain any IP SANs" 2026-03-10T13:13:33.769 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=warn ts=2026-03-10T13:13:33.533Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.100:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.100 because it doesn't contain any IP SANs" 2026-03-10T13:13:33.769 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:33 vm00 systemd[1]: Stopping Ceph alertmanager.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:13:33.769 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[73057]: level=info ts=2026-03-10T13:13:33.744Z caller=main.go:557 msg="Received SIGTERM, exiting gracefully..." 2026-03-10T13:13:33.769 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:33 vm00 podman[78798]: 2026-03-10 13:13:33.754764458 +0000 UTC m=+0.023163917 container died da91a70a93ac7f97f86bee4205d1a93a3007359e4a56018a04f1f652c57338c0 (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-10T13:13:34.088 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:33 vm00 podman[78798]: 2026-03-10 13:13:33.773242653 +0000 UTC m=+0.041642123 container remove da91a70a93ac7f97f86bee4205d1a93a3007359e4a56018a04f1f652c57338c0 (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-10T13:13:34.088 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:33 vm00 podman[78798]: 2026-03-10 13:13:33.77411683 +0000 UTC m=+0.042516289 volume remove c202a28c8467f574183faa5822471a8c63f60aab7ef1afab68e9fd2b2d6e7ec0 2026-03-10T13:13:34.088 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:33 vm00 bash[78798]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a 2026-03-10T13:13:34.088 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:33 vm00 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@alertmanager.a.service: Deactivated successfully. 2026-03-10T13:13:34.088 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:33 vm00 systemd[1]: Stopped Ceph alertmanager.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:13:34.088 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:33 vm00 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@alertmanager.a.service: Consumed 1.033s CPU time. 2026-03-10T13:13:34.088 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:34 vm00 systemd[1]: Starting Ceph alertmanager.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:13:34.347 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:34 vm00 podman[78910]: 2026-03-10 13:13:34.087120127 +0000 UTC m=+0.015141630 volume create a1974002b31b7238b1363445cdca72ed4a4069cb926b431b6dd700dd9d26e8e0 2026-03-10T13:13:34.347 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:34 vm00 podman[78910]: 2026-03-10 13:13:34.090218158 +0000 UTC m=+0.018239650 container create 12fde3cf83cba9d0a6f9479be80ea8d92beb092408b37b172a4c5a573bffc836 (image=quay.io/prometheus/alertmanager:v0.25.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-10T13:13:34.347 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:34 vm00 podman[78910]: 2026-03-10 13:13:34.114383701 +0000 UTC m=+0.042405204 container init 12fde3cf83cba9d0a6f9479be80ea8d92beb092408b37b172a4c5a573bffc836 (image=quay.io/prometheus/alertmanager:v0.25.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-10T13:13:34.347 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:34 vm00 podman[78910]: 2026-03-10 13:13:34.116699536 +0000 UTC m=+0.044721039 container start 12fde3cf83cba9d0a6f9479be80ea8d92beb092408b37b172a4c5a573bffc836 (image=quay.io/prometheus/alertmanager:v0.25.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-10T13:13:34.347 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:34 vm00 bash[78910]: 12fde3cf83cba9d0a6f9479be80ea8d92beb092408b37b172a4c5a573bffc836 2026-03-10T13:13:34.347 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:34 vm00 podman[78910]: 2026-03-10 13:13:34.08206848 +0000 UTC m=+0.010089993 image pull c8568f914cd25b2062c44e9f79f9c18da6e3b85fe0c47a12a2191c61426c2b19 quay.io/prometheus/alertmanager:v0.25.0 2026-03-10T13:13:34.347 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:34 vm00 systemd[1]: Started Ceph alertmanager.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:13:34.347 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:13:34.135Z caller=main.go:240 level=info msg="Starting Alertmanager" version="(version=0.25.0, branch=HEAD, revision=258fab7cdd551f2cf251ed0348f0ad7289aee789)" 2026-03-10T13:13:34.347 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:13:34.135Z caller=main.go:241 level=info build_context="(go=go1.19.4, user=root@abe866dd5717, date=20221222-14:51:36)" 2026-03-10T13:13:34.347 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:13:34.136Z caller=cluster.go:185 level=info component=cluster msg="setting advertise address explicitly" addr=192.168.123.100 port=9094 2026-03-10T13:13:34.347 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:13:34.137Z caller=cluster.go:681 level=info component=cluster msg="Waiting for gossip to settle..." interval=2s 2026-03-10T13:13:34.347 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:13:34.159Z caller=coordinator.go:113 level=info component=configuration msg="Loading configuration file" file=/etc/alertmanager/alertmanager.yml 2026-03-10T13:13:34.347 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:13:34.159Z caller=coordinator.go:126 level=info component=configuration msg="Completed loading of configuration file" file=/etc/alertmanager/alertmanager.yml 2026-03-10T13:13:34.347 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:13:34.162Z caller=tls_config.go:232 level=info msg="Listening on" address=[::]:9093 2026-03-10T13:13:34.347 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:13:34.162Z caller=tls_config.go:235 level=info msg="TLS is disabled." http2=false address=[::]:9093 2026-03-10T13:13:35.232 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:35 vm08 ceph-mon[49535]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-10T13:13:35.232 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:35 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:35.233 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:35 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:35.233 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:35 vm08 ceph-mon[49535]: Reconfiguring iscsi.foo.vm00.dezodo (dependencies changed)... 2026-03-10T13:13:35.233 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:35 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:13:35.233 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:35 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:13:35.233 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:35 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:13:35.233 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:35 vm08 ceph-mon[49535]: Reconfiguring daemon iscsi.foo.vm00.dezodo on vm00 2026-03-10T13:13:35.233 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:35 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:35.233 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:35 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:35.233 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:35 vm08 systemd[1]: Stopping Ceph node-exporter.b for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:13:35.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:35 vm00 ceph-mon[47364]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-10T13:13:35.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:35 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:35.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:35 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:35.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:35 vm00 ceph-mon[47364]: Reconfiguring iscsi.foo.vm00.dezodo (dependencies changed)... 2026-03-10T13:13:35.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:35 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:13:35.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:35 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:13:35.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:35 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:13:35.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:35 vm00 ceph-mon[47364]: Reconfiguring daemon iscsi.foo.vm00.dezodo on vm00 2026-03-10T13:13:35.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:35 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:35.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:35 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:35.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:35 vm00 ceph-mon[51670]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-10T13:13:35.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:35 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:35.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:35 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:35.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:35 vm00 ceph-mon[51670]: Reconfiguring iscsi.foo.vm00.dezodo (dependencies changed)... 2026-03-10T13:13:35.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:35 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:13:35.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:35 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:13:35.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:35 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:13:35.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:35 vm00 ceph-mon[51670]: Reconfiguring daemon iscsi.foo.vm00.dezodo on vm00 2026-03-10T13:13:35.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:35 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:35.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:35 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:35.520 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:35 vm08 podman[70741]: 2026-03-10 13:13:35.231358134 +0000 UTC m=+0.025557415 container died d5ba7ccd220b2d9fddc9e021137534a1e6cf2e404e9fefd74db924d1cc76345a (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-10T13:13:35.520 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:35 vm08 podman[70741]: 2026-03-10 13:13:35.249535123 +0000 UTC m=+0.043734415 container remove d5ba7ccd220b2d9fddc9e021137534a1e6cf2e404e9fefd74db924d1cc76345a (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-10T13:13:35.520 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:35 vm08 bash[70741]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b 2026-03-10T13:13:35.520 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:35 vm08 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@node-exporter.b.service: Main process exited, code=exited, status=143/n/a 2026-03-10T13:13:35.520 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:35 vm08 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@node-exporter.b.service: Failed with result 'exit-code'. 2026-03-10T13:13:35.520 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:35 vm08 systemd[1]: Stopped Ceph node-exporter.b for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:13:35.521 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:35 vm08 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@node-exporter.b.service: Consumed 1.312s CPU time. 2026-03-10T13:13:35.521 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:35 vm08 systemd[1]: Starting Ceph node-exporter.b for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:13:36.020 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:35 vm08 bash[70851]: Trying to pull quay.io/prometheus/node-exporter:v1.7.0... 2026-03-10T13:13:36.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:36 vm00 ceph-mon[47364]: Reconfiguring node-exporter.b (dependencies changed)... 2026-03-10T13:13:36.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:36 vm00 ceph-mon[47364]: Deploying daemon node-exporter.b on vm08 2026-03-10T13:13:36.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:36 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/3044075266' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T13:13:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:36 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/1642032562' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/1017778394"}]: dispatch 2026-03-10T13:13:36.503 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:36 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:13:36.138Z caller=cluster.go:706 level=info component=cluster msg="gossip not settled" polls=0 before=0 now=1 elapsed=2.000414064s 2026-03-10T13:13:36.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:36 vm00 ceph-mon[51670]: Reconfiguring node-exporter.b (dependencies changed)... 2026-03-10T13:13:36.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:36 vm00 ceph-mon[51670]: Deploying daemon node-exporter.b on vm08 2026-03-10T13:13:36.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:36 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/3044075266' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T13:13:36.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:36 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/1642032562' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/1017778394"}]: dispatch 2026-03-10T13:13:36.733 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:13:36 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:13:36] "GET /metrics HTTP/1.1" 200 34539 "" "Prometheus/2.33.4" 2026-03-10T13:13:36.733 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:36 vm08 ceph-mon[49535]: Reconfiguring node-exporter.b (dependencies changed)... 2026-03-10T13:13:36.733 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:36 vm08 ceph-mon[49535]: Deploying daemon node-exporter.b on vm08 2026-03-10T13:13:36.733 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:36 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/3044075266' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T13:13:36.733 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:36 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/1642032562' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/1017778394"}]: dispatch 2026-03-10T13:13:37.020 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:36 vm08 bash[70851]: Getting image source signatures 2026-03-10T13:13:37.020 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:36 vm08 bash[70851]: Copying blob sha256:324153f2810a9927fcce320af9e4e291e0b6e805cbdd1f338386c756b9defa24 2026-03-10T13:13:37.020 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:36 vm08 bash[70851]: Copying blob sha256:2abcce694348cd2c949c0e98a7400ebdfd8341021bcf6b541bc72033ce982510 2026-03-10T13:13:37.020 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:36 vm08 bash[70851]: Copying blob sha256:455fd88e5221bc1e278ef2d059cd70e4df99a24e5af050ede621534276f6cf9a 2026-03-10T13:13:37.540 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 bash[70851]: Copying config sha256:72c9c208898624938c9e4183d6686ea4a5fd3f912bc29bc3f00147924c521a3e 2026-03-10T13:13:37.540 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 bash[70851]: Writing manifest to image destination 2026-03-10T13:13:37.540 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 podman[70851]: 2026-03-10 13:13:37.506773438 +0000 UTC m=+1.965075145 container create 4ac83f03f8180efcc27acff61b1ff929b556230bc7f90fd9f7e2ad964eb064d2 (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-10T13:13:37.540 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 podman[70851]: 2026-03-10 13:13:37.527227669 +0000 UTC m=+1.985529388 container init 4ac83f03f8180efcc27acff61b1ff929b556230bc7f90fd9f7e2ad964eb064d2 (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-10T13:13:37.540 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 podman[70851]: 2026-03-10 13:13:37.529292855 +0000 UTC m=+1.987594562 container start 4ac83f03f8180efcc27acff61b1ff929b556230bc7f90fd9f7e2ad964eb064d2 (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-10T13:13:37.540 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 bash[70851]: 4ac83f03f8180efcc27acff61b1ff929b556230bc7f90fd9f7e2ad964eb064d2 2026-03-10T13:13:37.540 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 podman[70851]: 2026-03-10 13:13:37.500135471 +0000 UTC m=+1.958437198 image pull 72c9c208898624938c9e4183d6686ea4a5fd3f912bc29bc3f00147924c521a3e quay.io/prometheus/node-exporter:v1.7.0 2026-03-10T13:13:37.540 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.532Z caller=node_exporter.go:192 level=info msg="Starting node_exporter" version="(version=1.7.0, branch=HEAD, revision=7333465abf9efba81876303bb57e6fadb946041b)" 2026-03-10T13:13:37.540 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.532Z caller=node_exporter.go:193 level=info msg="Build context" build_context="(go=go1.21.4, platform=linux/amd64, user=root@35918982f6d8, date=20231112-23:53:35, tags=netgo osusergo static_build)" 2026-03-10T13:13:37.540 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.534Z caller=diskstats_common.go:111 level=info collector=diskstats msg="Parsed flag --collector.diskstats.device-exclude" flag=^(ram|loop|fd|(h|s|v|xv)d[a-z]|nvme\d+n\d+p)\d+$ 2026-03-10T13:13:37.540 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.534Z caller=diskstats_linux.go:265 level=error collector=diskstats msg="Failed to open directory, disabling udev device properties" path=/run/udev/data 2026-03-10T13:13:37.540 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.535Z caller=filesystem_common.go:111 level=info collector=filesystem msg="Parsed flag --collector.filesystem.mount-points-exclude" flag=^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+|var/lib/containers/storage/.+)($|/) 2026-03-10T13:13:37.540 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.535Z caller=filesystem_common.go:113 level=info collector=filesystem msg="Parsed flag --collector.filesystem.fs-types-exclude" flag=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$ 2026-03-10T13:13:37.540 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 systemd[1]: Started Ceph node-exporter.b for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:13:37.540 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.535Z caller=node_exporter.go:110 level=info msg="Enabled collectors" 2026-03-10T13:13:37.540 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.535Z caller=node_exporter.go:117 level=info collector=arp 2026-03-10T13:13:37.540 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.535Z caller=node_exporter.go:117 level=info collector=bcache 2026-03-10T13:13:37.540 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.535Z caller=node_exporter.go:117 level=info collector=bonding 2026-03-10T13:13:37.540 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.535Z caller=node_exporter.go:117 level=info collector=btrfs 2026-03-10T13:13:37.540 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.535Z caller=node_exporter.go:117 level=info collector=conntrack 2026-03-10T13:13:37.540 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.535Z caller=node_exporter.go:117 level=info collector=cpu 2026-03-10T13:13:37.540 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.535Z caller=node_exporter.go:117 level=info collector=cpufreq 2026-03-10T13:13:37.540 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.535Z caller=node_exporter.go:117 level=info collector=diskstats 2026-03-10T13:13:37.540 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.535Z caller=node_exporter.go:117 level=info collector=dmi 2026-03-10T13:13:37.540 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.535Z caller=node_exporter.go:117 level=info collector=edac 2026-03-10T13:13:37.540 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.535Z caller=node_exporter.go:117 level=info collector=entropy 2026-03-10T13:13:37.541 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.535Z caller=node_exporter.go:117 level=info collector=fibrechannel 2026-03-10T13:13:37.541 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.535Z caller=node_exporter.go:117 level=info collector=filefd 2026-03-10T13:13:37.541 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.535Z caller=node_exporter.go:117 level=info collector=filesystem 2026-03-10T13:13:37.541 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.535Z caller=node_exporter.go:117 level=info collector=hwmon 2026-03-10T13:13:37.541 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.535Z caller=node_exporter.go:117 level=info collector=infiniband 2026-03-10T13:13:37.541 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.535Z caller=node_exporter.go:117 level=info collector=ipvs 2026-03-10T13:13:37.541 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.535Z caller=node_exporter.go:117 level=info collector=loadavg 2026-03-10T13:13:37.541 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.535Z caller=node_exporter.go:117 level=info collector=mdadm 2026-03-10T13:13:37.541 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.535Z caller=node_exporter.go:117 level=info collector=meminfo 2026-03-10T13:13:37.541 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.535Z caller=node_exporter.go:117 level=info collector=netclass 2026-03-10T13:13:37.541 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.535Z caller=node_exporter.go:117 level=info collector=netdev 2026-03-10T13:13:37.541 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.535Z caller=node_exporter.go:117 level=info collector=netstat 2026-03-10T13:13:37.541 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.535Z caller=node_exporter.go:117 level=info collector=nfs 2026-03-10T13:13:37.541 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.535Z caller=node_exporter.go:117 level=info collector=nfsd 2026-03-10T13:13:37.541 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.535Z caller=node_exporter.go:117 level=info collector=nvme 2026-03-10T13:13:37.541 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.535Z caller=node_exporter.go:117 level=info collector=os 2026-03-10T13:13:37.541 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.535Z caller=node_exporter.go:117 level=info collector=powersupplyclass 2026-03-10T13:13:37.541 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.535Z caller=node_exporter.go:117 level=info collector=pressure 2026-03-10T13:13:37.541 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.536Z caller=node_exporter.go:117 level=info collector=rapl 2026-03-10T13:13:37.541 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.536Z caller=node_exporter.go:117 level=info collector=schedstat 2026-03-10T13:13:37.541 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.536Z caller=node_exporter.go:117 level=info collector=selinux 2026-03-10T13:13:37.541 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.536Z caller=node_exporter.go:117 level=info collector=sockstat 2026-03-10T13:13:37.541 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.536Z caller=node_exporter.go:117 level=info collector=softnet 2026-03-10T13:13:37.541 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.536Z caller=node_exporter.go:117 level=info collector=stat 2026-03-10T13:13:37.541 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.536Z caller=node_exporter.go:117 level=info collector=tapestats 2026-03-10T13:13:37.541 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.536Z caller=node_exporter.go:117 level=info collector=textfile 2026-03-10T13:13:37.541 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.536Z caller=node_exporter.go:117 level=info collector=thermal_zone 2026-03-10T13:13:37.541 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.536Z caller=node_exporter.go:117 level=info collector=time 2026-03-10T13:13:37.541 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.536Z caller=node_exporter.go:117 level=info collector=udp_queues 2026-03-10T13:13:37.541 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.536Z caller=node_exporter.go:117 level=info collector=uname 2026-03-10T13:13:37.541 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.536Z caller=node_exporter.go:117 level=info collector=vmstat 2026-03-10T13:13:37.541 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.536Z caller=node_exporter.go:117 level=info collector=xfs 2026-03-10T13:13:37.541 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.536Z caller=node_exporter.go:117 level=info collector=zfs 2026-03-10T13:13:37.543 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-mon[49535]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-10T13:13:37.543 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/1642032562' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/1017778394"}]': finished 2026-03-10T13:13:37.543 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-mon[49535]: osdmap e79: 8 total, 8 up, 8 in 2026-03-10T13:13:37.543 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/2733691083' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/1017778394"}]: dispatch 2026-03-10T13:13:37.543 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-mon[49535]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/1017778394"}]: dispatch 2026-03-10T13:13:37.548 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.536Z caller=tls_config.go:274 level=info msg="Listening on" address=[::]:9100 2026-03-10T13:13:37.548 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:13:37 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b[70906]: ts=2026-03-10T13:13:37.536Z caller=tls_config.go:277 level=info msg="TLS is disabled." http2=false address=[::]:9100 2026-03-10T13:13:37.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:37 vm00 ceph-mon[47364]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-10T13:13:37.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:37 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/1642032562' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/1017778394"}]': finished 2026-03-10T13:13:37.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:37 vm00 ceph-mon[47364]: osdmap e79: 8 total, 8 up, 8 in 2026-03-10T13:13:37.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:37 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2733691083' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/1017778394"}]: dispatch 2026-03-10T13:13:37.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:37 vm00 ceph-mon[47364]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/1017778394"}]: dispatch 2026-03-10T13:13:37.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:37 vm00 ceph-mon[51670]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-10T13:13:37.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:37 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/1642032562' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/1017778394"}]': finished 2026-03-10T13:13:37.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:37 vm00 ceph-mon[51670]: osdmap e79: 8 total, 8 up, 8 in 2026-03-10T13:13:37.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:37 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2733691083' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/1017778394"}]: dispatch 2026-03-10T13:13:37.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:37 vm00 ceph-mon[51670]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/1017778394"}]: dispatch 2026-03-10T13:13:38.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:38 vm08 ceph-mon[49535]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/1017778394"}]': finished 2026-03-10T13:13:38.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:38 vm08 ceph-mon[49535]: osdmap e80: 8 total, 8 up, 8 in 2026-03-10T13:13:38.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:38 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/3465303451' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/438761138"}]: dispatch 2026-03-10T13:13:38.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:38 vm08 ceph-mon[49535]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/438761138"}]: dispatch 2026-03-10T13:13:38.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:38 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:38.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:38 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:38.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:38 vm08 ceph-mon[49535]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-10T13:13:38.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:38 vm00 ceph-mon[47364]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/1017778394"}]': finished 2026-03-10T13:13:38.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:38 vm00 ceph-mon[47364]: osdmap e80: 8 total, 8 up, 8 in 2026-03-10T13:13:38.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:38 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/3465303451' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/438761138"}]: dispatch 2026-03-10T13:13:38.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:38 vm00 ceph-mon[47364]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/438761138"}]: dispatch 2026-03-10T13:13:38.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:38 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:38.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:38 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:38.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:38 vm00 ceph-mon[47364]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-10T13:13:38.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:38 vm00 ceph-mon[51670]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/1017778394"}]': finished 2026-03-10T13:13:38.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:38 vm00 ceph-mon[51670]: osdmap e80: 8 total, 8 up, 8 in 2026-03-10T13:13:38.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:38 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/3465303451' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/438761138"}]: dispatch 2026-03-10T13:13:38.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:38 vm00 ceph-mon[51670]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/438761138"}]: dispatch 2026-03-10T13:13:38.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:38 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:38.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:38 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:38.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:38 vm00 ceph-mon[51670]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-10T13:13:39.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:39 vm00 ceph-mon[47364]: Deploying daemon prometheus.a on vm08 2026-03-10T13:13:39.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:39 vm00 ceph-mon[47364]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 23 KiB/s rd, 0 B/s wr, 10 op/s 2026-03-10T13:13:39.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:39 vm00 ceph-mon[47364]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/438761138"}]': finished 2026-03-10T13:13:39.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:39 vm00 ceph-mon[47364]: osdmap e81: 8 total, 8 up, 8 in 2026-03-10T13:13:39.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:39 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/3973805684' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3581815720"}]: dispatch 2026-03-10T13:13:39.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:39 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:13:39.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:39 vm00 ceph-mon[51670]: Deploying daemon prometheus.a on vm08 2026-03-10T13:13:39.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:39 vm00 ceph-mon[51670]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 23 KiB/s rd, 0 B/s wr, 10 op/s 2026-03-10T13:13:39.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:39 vm00 ceph-mon[51670]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/438761138"}]': finished 2026-03-10T13:13:39.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:39 vm00 ceph-mon[51670]: osdmap e81: 8 total, 8 up, 8 in 2026-03-10T13:13:39.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:39 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/3973805684' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3581815720"}]: dispatch 2026-03-10T13:13:39.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:39 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:13:39.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:39 vm08 ceph-mon[49535]: Deploying daemon prometheus.a on vm08 2026-03-10T13:13:39.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:39 vm08 ceph-mon[49535]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 23 KiB/s rd, 0 B/s wr, 10 op/s 2026-03-10T13:13:39.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:39 vm08 ceph-mon[49535]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/438761138"}]': finished 2026-03-10T13:13:39.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:39 vm08 ceph-mon[49535]: osdmap e81: 8 total, 8 up, 8 in 2026-03-10T13:13:39.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:39 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/3973805684' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3581815720"}]: dispatch 2026-03-10T13:13:39.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:39 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:13:40.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:40 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/3973805684' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3581815720"}]': finished 2026-03-10T13:13:40.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:40 vm00 ceph-mon[47364]: osdmap e82: 8 total, 8 up, 8 in 2026-03-10T13:13:40.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:40 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/522722918' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3644663112"}]: dispatch 2026-03-10T13:13:40.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:40 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/3973805684' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3581815720"}]': finished 2026-03-10T13:13:40.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:40 vm00 ceph-mon[51670]: osdmap e82: 8 total, 8 up, 8 in 2026-03-10T13:13:40.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:40 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/522722918' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3644663112"}]: dispatch 2026-03-10T13:13:40.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:40 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/3973805684' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3581815720"}]': finished 2026-03-10T13:13:40.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:40 vm08 ceph-mon[49535]: osdmap e82: 8 total, 8 up, 8 in 2026-03-10T13:13:40.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:40 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/522722918' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3644663112"}]: dispatch 2026-03-10T13:13:41.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:41 vm00 ceph-mon[47364]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:13:41.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:41 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/522722918' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3644663112"}]': finished 2026-03-10T13:13:41.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:41 vm00 ceph-mon[47364]: osdmap e83: 8 total, 8 up, 8 in 2026-03-10T13:13:41.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:41 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/888188467' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2380814906"}]: dispatch 2026-03-10T13:13:41.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:41 vm00 ceph-mon[47364]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2380814906"}]: dispatch 2026-03-10T13:13:41.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:41 vm00 ceph-mon[51670]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:13:41.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:41 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/522722918' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3644663112"}]': finished 2026-03-10T13:13:41.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:41 vm00 ceph-mon[51670]: osdmap e83: 8 total, 8 up, 8 in 2026-03-10T13:13:41.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:41 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/888188467' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2380814906"}]: dispatch 2026-03-10T13:13:41.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:41 vm00 ceph-mon[51670]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2380814906"}]: dispatch 2026-03-10T13:13:41.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:41 vm08 ceph-mon[49535]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:13:41.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:41 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/522722918' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3644663112"}]': finished 2026-03-10T13:13:41.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:41 vm08 ceph-mon[49535]: osdmap e83: 8 total, 8 up, 8 in 2026-03-10T13:13:41.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:41 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/888188467' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2380814906"}]: dispatch 2026-03-10T13:13:41.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:41 vm08 ceph-mon[49535]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2380814906"}]: dispatch 2026-03-10T13:13:42.580 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:42 vm08 ceph-mon[49535]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2380814906"}]': finished 2026-03-10T13:13:42.581 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:42 vm08 ceph-mon[49535]: osdmap e84: 8 total, 8 up, 8 in 2026-03-10T13:13:42.581 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:42 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/2503346112' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1924038199"}]: dispatch 2026-03-10T13:13:42.581 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:42 vm08 ceph-mon[49535]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1924038199"}]: dispatch 2026-03-10T13:13:42.581 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:42 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[66905]: ts=2026-03-10T13:13:42.314Z caller=manager.go:609 level=warn component="rule manager" group=pools msg="Evaluating rule failed" rule="alert: CephPoolGrowthWarning\nexpr: (predict_linear(ceph_pool_percent_used[2d], 3600 * 24 * 5) * on(pool_id) group_right()\n ceph_pool_metadata) >= 95\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.9.2\n severity: warning\n type: ceph_default\nannotations:\n description: |\n Pool '{{ $labels.name }}' will be full in less than 5 days assuming the average fill-up rate of the past 48 hours.\n summary: Pool growth rate may soon exceed it's capacity\n" err="found duplicate series for the match group {pool_id=\"1\"} on the left hand-side of the operation: [{instance=\"192.168.123.108:9283\", job=\"ceph\", pool_id=\"1\"}, {instance=\"192.168.123.100:9283\", job=\"ceph\", pool_id=\"1\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:13:42.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:42 vm00 ceph-mon[47364]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2380814906"}]': finished 2026-03-10T13:13:42.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:42 vm00 ceph-mon[47364]: osdmap e84: 8 total, 8 up, 8 in 2026-03-10T13:13:42.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:42 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2503346112' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1924038199"}]: dispatch 2026-03-10T13:13:42.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:42 vm00 ceph-mon[47364]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1924038199"}]: dispatch 2026-03-10T13:13:42.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:42 vm00 ceph-mon[51670]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2380814906"}]': finished 2026-03-10T13:13:42.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:42 vm00 ceph-mon[51670]: osdmap e84: 8 total, 8 up, 8 in 2026-03-10T13:13:42.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:42 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2503346112' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1924038199"}]: dispatch 2026-03-10T13:13:42.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:42 vm00 ceph-mon[51670]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1924038199"}]: dispatch 2026-03-10T13:13:42.831 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:42 vm08 systemd[1]: Stopping Ceph prometheus.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:13:42.831 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:42 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[66905]: ts=2026-03-10T13:13:42.802Z caller=main.go:775 level=warn msg="Received SIGTERM, exiting gracefully..." 2026-03-10T13:13:42.831 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:42 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[66905]: ts=2026-03-10T13:13:42.803Z caller=main.go:798 level=info msg="Stopping scrape discovery manager..." 2026-03-10T13:13:42.831 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:42 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[66905]: ts=2026-03-10T13:13:42.803Z caller=main.go:812 level=info msg="Stopping notify discovery manager..." 2026-03-10T13:13:42.831 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:42 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[66905]: ts=2026-03-10T13:13:42.803Z caller=main.go:834 level=info msg="Stopping scrape manager..." 2026-03-10T13:13:42.831 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:42 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[66905]: ts=2026-03-10T13:13:42.803Z caller=main.go:794 level=info msg="Scrape discovery manager stopped" 2026-03-10T13:13:42.831 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:42 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[66905]: ts=2026-03-10T13:13:42.803Z caller=main.go:808 level=info msg="Notify discovery manager stopped" 2026-03-10T13:13:42.831 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:42 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[66905]: ts=2026-03-10T13:13:42.803Z caller=manager.go:945 level=info component="rule manager" msg="Stopping rule manager..." 2026-03-10T13:13:42.831 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:42 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[66905]: ts=2026-03-10T13:13:42.803Z caller=manager.go:955 level=info component="rule manager" msg="Rule manager stopped" 2026-03-10T13:13:42.832 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:42 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[66905]: ts=2026-03-10T13:13:42.803Z caller=main.go:828 level=info msg="Scrape manager stopped" 2026-03-10T13:13:42.832 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:42 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[66905]: ts=2026-03-10T13:13:42.805Z caller=notifier.go:600 level=info component=notifier msg="Stopping notification manager..." 2026-03-10T13:13:42.832 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:42 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[66905]: ts=2026-03-10T13:13:42.805Z caller=main.go:1054 level=info msg="Notifier manager stopped" 2026-03-10T13:13:42.832 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:42 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[66905]: ts=2026-03-10T13:13:42.805Z caller=main.go:1066 level=info msg="See you next time!" 2026-03-10T13:13:42.832 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:42 vm08 podman[71267]: 2026-03-10 13:13:42.814429887 +0000 UTC m=+0.028157394 container died 0a921c94fbaee48eb66569df197ffd4f8f996767222a7d3a6c95dda415ba2c8f (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T13:13:43.163 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:42 vm08 podman[71267]: 2026-03-10 13:13:42.829753304 +0000 UTC m=+0.043480811 container remove 0a921c94fbaee48eb66569df197ffd4f8f996767222a7d3a6c95dda415ba2c8f (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T13:13:43.163 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:42 vm08 bash[71267]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a 2026-03-10T13:13:43.163 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:42 vm08 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@prometheus.a.service: Deactivated successfully. 2026-03-10T13:13:43.163 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:42 vm08 systemd[1]: Stopped Ceph prometheus.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:13:43.163 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:42 vm08 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@prometheus.a.service: Consumed 1.021s CPU time. 2026-03-10T13:13:43.163 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:43 vm08 systemd[1]: Starting Ceph prometheus.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:13:43.427 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:43 vm08 podman[71376]: 2026-03-10 13:13:43.161475132 +0000 UTC m=+0.017541411 container create 5ef54cde2aad32806eb7fe252926d9ce30a197177bd920c75e48f208ffb042f2 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T13:13:43.427 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:43 vm08 podman[71376]: 2026-03-10 13:13:43.185011723 +0000 UTC m=+0.041078003 container init 5ef54cde2aad32806eb7fe252926d9ce30a197177bd920c75e48f208ffb042f2 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T13:13:43.427 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:43 vm08 podman[71376]: 2026-03-10 13:13:43.187298335 +0000 UTC m=+0.043364614 container start 5ef54cde2aad32806eb7fe252926d9ce30a197177bd920c75e48f208ffb042f2 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T13:13:43.427 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:43 vm08 bash[71376]: 5ef54cde2aad32806eb7fe252926d9ce30a197177bd920c75e48f208ffb042f2 2026-03-10T13:13:43.427 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:43 vm08 podman[71376]: 2026-03-10 13:13:43.154293026 +0000 UTC m=+0.010359315 image pull 1d3b7f56885b6dd623f1785be963aa9c195f86bc256ea454e8d02a7980b79c53 quay.io/prometheus/prometheus:v2.51.0 2026-03-10T13:13:43.427 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:43 vm08 systemd[1]: Started Ceph prometheus.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:13:43.427 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:13:43.226Z caller=main.go:617 level=info msg="Starting Prometheus Server" mode=server version="(version=2.51.0, branch=HEAD, revision=c05c15512acb675e3f6cd662a6727854e93fc024)" 2026-03-10T13:13:43.427 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:13:43.226Z caller=main.go:622 level=info build_context="(go=go1.22.1, platform=linux/amd64, user=root@b5723e458358, date=20240319-10:54:45, tags=netgo,builtinassets,stringlabels)" 2026-03-10T13:13:43.427 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:13:43.226Z caller=main.go:623 level=info host_details="(Linux 5.14.0-686.el9.x86_64 #1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026 x86_64 vm08 (none))" 2026-03-10T13:13:43.427 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:13:43.226Z caller=main.go:624 level=info fd_limits="(soft=1048576, hard=1048576)" 2026-03-10T13:13:43.427 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:13:43.226Z caller=main.go:625 level=info vm_limits="(soft=unlimited, hard=unlimited)" 2026-03-10T13:13:43.427 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:13:43.230Z caller=web.go:568 level=info component=web msg="Start listening for connections" address=:9095 2026-03-10T13:13:43.427 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:13:43.230Z caller=main.go:1129 level=info msg="Starting TSDB ..." 2026-03-10T13:13:43.427 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:13:43.232Z caller=head.go:616 level=info component=tsdb msg="Replaying on-disk memory mappable chunks if any" 2026-03-10T13:13:43.427 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:13:43.232Z caller=head.go:698 level=info component=tsdb msg="On-disk memory mappable chunks replay completed" duration=971ns 2026-03-10T13:13:43.427 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:13:43.232Z caller=head.go:706 level=info component=tsdb msg="Replaying WAL, this may take a while" 2026-03-10T13:13:43.427 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:13:43.232Z caller=tls_config.go:313 level=info component=web msg="Listening on" address=[::]:9095 2026-03-10T13:13:43.427 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:13:43.232Z caller=tls_config.go:316 level=info component=web msg="TLS is disabled." http2=false address=[::]:9095 2026-03-10T13:13:43.427 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:13:43.240Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=0 maxSegment=2 2026-03-10T13:13:43.427 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:13:43.254Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=1 maxSegment=2 2026-03-10T13:13:43.427 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:13:43.254Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=2 maxSegment=2 2026-03-10T13:13:43.427 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:13:43.254Z caller=head.go:815 level=info component=tsdb msg="WAL replay completed" checkpoint_replay_duration=77.004µs wal_replay_duration=21.965311ms wbl_replay_duration=400ns total_replay_duration=22.08815ms 2026-03-10T13:13:43.427 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:13:43.257Z caller=main.go:1150 level=info fs_type=XFS_SUPER_MAGIC 2026-03-10T13:13:43.427 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:13:43.257Z caller=main.go:1153 level=info msg="TSDB started" 2026-03-10T13:13:43.427 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:13:43.257Z caller=main.go:1335 level=info msg="Loading configuration file" filename=/etc/prometheus/prometheus.yml 2026-03-10T13:13:43.427 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:13:43.269Z caller=main.go:1372 level=info msg="Completed loading of configuration file" filename=/etc/prometheus/prometheus.yml totalDuration=12.259229ms db_storage=652ns remote_storage=1.253µs web_handler=311ns query_engine=612ns scrape=461.455µs scrape_sd=118.201µs notify=10.941µs notify_sd=7.253µs rules=11.322636ms tracing=4.108µs 2026-03-10T13:13:43.427 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:13:43.269Z caller=main.go:1114 level=info msg="Server is ready to receive web requests." 2026-03-10T13:13:43.427 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:13:43.269Z caller=manager.go:163 level=info component="rule manager" msg="Starting rule manager..." 2026-03-10T13:13:43.696 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:43 vm08 ceph-mon[49535]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 72 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:13:43.696 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:43 vm08 ceph-mon[49535]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1924038199"}]': finished 2026-03-10T13:13:43.696 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:43 vm08 ceph-mon[49535]: osdmap e85: 8 total, 8 up, 8 in 2026-03-10T13:13:43.696 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:43 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:43.696 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:43 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:43.696 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:43 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:43.696 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:43 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:43.696 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:43 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T13:13:43.696 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:43 vm08 systemd[1]: Stopping Ceph grafana.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:13:43.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:43 vm00 ceph-mon[47364]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 72 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:13:43.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:43 vm00 ceph-mon[47364]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1924038199"}]': finished 2026-03-10T13:13:43.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:43 vm00 ceph-mon[47364]: osdmap e85: 8 total, 8 up, 8 in 2026-03-10T13:13:43.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:43 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:43.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:43 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:43.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:43 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:43.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:43 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:43.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:43 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T13:13:43.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:43 vm00 ceph-mon[51670]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 72 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:13:43.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:43 vm00 ceph-mon[51670]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1924038199"}]': finished 2026-03-10T13:13:43.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:43 vm00 ceph-mon[51670]: osdmap e85: 8 total, 8 up, 8 in 2026-03-10T13:13:43.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:43 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:43.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:43 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:43.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:43 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:43.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:43 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:43.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:43 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T13:13:43.954 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[64826]: t=2026-03-10T13:13:43+0000 lvl=info msg="Shutdown started" logger=server reason="System signal: terminated" 2026-03-10T13:13:43.954 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:43 vm08 podman[71514]: 2026-03-10 13:13:43.729618839 +0000 UTC m=+0.026883449 container died cc6207fccfd03ccb7b907a7a7cd76e80298b7fa3edb400fca9f81ca579215a3c (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a, vcs-type=git, vendor=Red Hat, Inc., distribution-scope=public, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, version=8.5, io.buildah.version=1.24.2, release=236.1648460182, io.k8s.display-name=Red Hat Universal Base Image 8, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, io.openshift.tags=base rhel8, summary=Grafana Container configured for Ceph mgr/dashboard integration, architecture=x86_64, maintainer=Paul Cuzner , name=ubi8, description=Ceph Grafana Container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, build-date=2022-03-28T10:36:18.413762, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, com.redhat.component=ubi8-container) 2026-03-10T13:13:43.954 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:43 vm08 podman[71514]: 2026-03-10 13:13:43.754936885 +0000 UTC m=+0.052201495 container remove cc6207fccfd03ccb7b907a7a7cd76e80298b7fa3edb400fca9f81ca579215a3c (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a, io.buildah.version=1.24.2, io.k8s.display-name=Red Hat Universal Base Image 8, build-date=2022-03-28T10:36:18.413762, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, description=Ceph Grafana Container, io.openshift.tags=base rhel8, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, distribution-scope=public, vendor=Red Hat, Inc., version=8.5, architecture=x86_64, com.redhat.component=ubi8-container, maintainer=Paul Cuzner , release=236.1648460182, summary=Grafana Container configured for Ceph mgr/dashboard integration, vcs-type=git, name=ubi8, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com) 2026-03-10T13:13:43.954 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:43 vm08 bash[71514]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a 2026-03-10T13:13:43.954 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:43 vm08 bash[71532]: Error: no container with name or ID "ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana.a" found: no such container 2026-03-10T13:13:43.954 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:43 vm08 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@grafana.a.service: Deactivated successfully. 2026-03-10T13:13:43.954 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:43 vm08 systemd[1]: Stopped Ceph grafana.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:13:43.954 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:43 vm08 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@grafana.a.service: Consumed 1.571s CPU time. 2026-03-10T13:13:43.954 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:43 vm08 systemd[1]: Starting Ceph grafana.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:13:43.955 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:43 vm08 podman[71573]: 2026-03-10 13:13:43.879503984 +0000 UTC m=+0.016768001 container create 263cac442a993903ce76fa4340f496a0ef92b93fbe9ee409eb8e6ba0ca84905c (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a, summary=Grafana Container configured for Ceph mgr/dashboard integration, io.openshift.expose-services=, build-date=2022-03-28T10:36:18.413762, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, release=236.1648460182, maintainer=Paul Cuzner , version=8.5, vcs-type=git, distribution-scope=public, name=ubi8, architecture=x86_64, com.redhat.component=ubi8-container, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.24.2, io.openshift.tags=base rhel8, io.k8s.display-name=Red Hat Universal Base Image 8, vendor=Red Hat, Inc., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, description=Ceph Grafana Container, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com) 2026-03-10T13:13:43.955 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:43 vm08 podman[71573]: 2026-03-10 13:13:43.907791851 +0000 UTC m=+0.045055868 container init 263cac442a993903ce76fa4340f496a0ef92b93fbe9ee409eb8e6ba0ca84905c (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, description=Ceph Grafana Container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, release=236.1648460182, maintainer=Paul Cuzner , vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, vcs-type=git, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, name=ubi8, version=8.5, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, io.buildah.version=1.24.2, architecture=x86_64, vendor=Red Hat, Inc., build-date=2022-03-28T10:36:18.413762, io.openshift.tags=base rhel8, io.k8s.display-name=Red Hat Universal Base Image 8, summary=Grafana Container configured for Ceph mgr/dashboard integration, com.redhat.component=ubi8-container, io.openshift.expose-services=) 2026-03-10T13:13:43.955 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:43 vm08 podman[71573]: 2026-03-10 13:13:43.91045646 +0000 UTC m=+0.047720477 container start 263cac442a993903ce76fa4340f496a0ef92b93fbe9ee409eb8e6ba0ca84905c (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a, io.openshift.tags=base rhel8, name=ubi8, vendor=Red Hat, Inc., description=Ceph Grafana Container, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, distribution-scope=public, release=236.1648460182, io.openshift.expose-services=, maintainer=Paul Cuzner , url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, build-date=2022-03-28T10:36:18.413762, com.redhat.component=ubi8-container, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, version=8.5, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.display-name=Red Hat Universal Base Image 8, summary=Grafana Container configured for Ceph mgr/dashboard integration, architecture=x86_64, io.buildah.version=1.24.2) 2026-03-10T13:13:43.955 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:43 vm08 bash[71573]: 263cac442a993903ce76fa4340f496a0ef92b93fbe9ee409eb8e6ba0ca84905c 2026-03-10T13:13:43.955 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:43 vm08 podman[71573]: 2026-03-10 13:13:43.872802208 +0000 UTC m=+0.010066245 image pull dad864ee21e98e69f4029d1e417aa085001566be0d322fbc75bc6f29b0050c01 quay.io/ceph/ceph-grafana:8.3.5 2026-03-10T13:13:43.955 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:43 vm08 systemd[1]: Started Ceph grafana.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:13:43.955 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[71583]: t=2026-03-10T13:13:43+0000 lvl=info msg="The state of unified alerting is still not defined. The decision will be made during as we run the database migrations" logger=settings 2026-03-10T13:13:44.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:13:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:13:44.140Z caller=cluster.go:698 level=info component=cluster msg="gossip settled; proceeding" elapsed=10.002972648s 2026-03-10T13:13:44.271 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:13:43] ENGINE Bus STOPPING 2026-03-10T13:13:44.272 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[71583]: t=2026-03-10T13:13:43+0000 lvl=warn msg="falling back to legacy setting of 'min_interval_seconds'; please use the configuration option in the `unified_alerting` section if Grafana 8 alerts are enabled." logger=settings 2026-03-10T13:13:44.272 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[71583]: t=2026-03-10T13:13:43+0000 lvl=info msg="Config loaded from" logger=settings file=/usr/share/grafana/conf/defaults.ini 2026-03-10T13:13:44.272 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[71583]: t=2026-03-10T13:13:43+0000 lvl=info msg="Config loaded from" logger=settings file=/etc/grafana/grafana.ini 2026-03-10T13:13:44.272 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[71583]: t=2026-03-10T13:13:43+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_DATA=/var/lib/grafana" 2026-03-10T13:13:44.272 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[71583]: t=2026-03-10T13:13:43+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_LOGS=/var/log/grafana" 2026-03-10T13:13:44.272 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[71583]: t=2026-03-10T13:13:43+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_PLUGINS=/var/lib/grafana/plugins" 2026-03-10T13:13:44.272 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[71583]: t=2026-03-10T13:13:43+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_PROVISIONING=/etc/grafana/provisioning" 2026-03-10T13:13:44.272 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[71583]: t=2026-03-10T13:13:43+0000 lvl=info msg="Path Home" logger=settings path=/usr/share/grafana 2026-03-10T13:13:44.272 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[71583]: t=2026-03-10T13:13:43+0000 lvl=info msg="Path Data" logger=settings path=/var/lib/grafana 2026-03-10T13:13:44.272 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[71583]: t=2026-03-10T13:13:43+0000 lvl=info msg="Path Logs" logger=settings path=/var/log/grafana 2026-03-10T13:13:44.272 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[71583]: t=2026-03-10T13:13:43+0000 lvl=info msg="Path Plugins" logger=settings path=/var/lib/grafana/plugins 2026-03-10T13:13:44.272 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[71583]: t=2026-03-10T13:13:43+0000 lvl=info msg="Path Provisioning" logger=settings path=/etc/grafana/provisioning 2026-03-10T13:13:44.272 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[71583]: t=2026-03-10T13:13:43+0000 lvl=info msg="App mode production" logger=settings 2026-03-10T13:13:44.272 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[71583]: t=2026-03-10T13:13:43+0000 lvl=info msg="Connecting to DB" logger=sqlstore dbtype=sqlite3 2026-03-10T13:13:44.272 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[71583]: t=2026-03-10T13:13:43+0000 lvl=warn msg="SQLite database file has broader permissions than it should" logger=sqlstore path=/var/lib/grafana/grafana.db mode=-rw-r--r-- expected=-rw-r----- 2026-03-10T13:13:44.272 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[71583]: t=2026-03-10T13:13:43+0000 lvl=info msg="Starting DB migrations" logger=migrator 2026-03-10T13:13:44.272 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[71583]: t=2026-03-10T13:13:43+0000 lvl=info msg="migrations completed" logger=migrator performed=0 skipped=377 duration=352.178µs 2026-03-10T13:13:44.272 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[71583]: t=2026-03-10T13:13:43+0000 lvl=info msg="Created default organization" logger=sqlstore 2026-03-10T13:13:44.272 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[71583]: t=2026-03-10T13:13:43+0000 lvl=info msg="Initialising plugins" logger=plugin.manager 2026-03-10T13:13:44.272 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[71583]: t=2026-03-10T13:13:43+0000 lvl=info msg="Plugin registered" logger=plugin.manager pluginId=input 2026-03-10T13:13:44.272 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:44 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[71583]: t=2026-03-10T13:13:44+0000 lvl=info msg="Plugin registered" logger=plugin.manager pluginId=grafana-piechart-panel 2026-03-10T13:13:44.272 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:44 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[71583]: t=2026-03-10T13:13:44+0000 lvl=info msg="Plugin registered" logger=plugin.manager pluginId=vonage-status-panel 2026-03-10T13:13:44.272 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:44 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[71583]: t=2026-03-10T13:13:44+0000 lvl=info msg="Live Push Gateway initialization" logger=live.push_http 2026-03-10T13:13:44.272 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:44 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[71583]: t=2026-03-10T13:13:44+0000 lvl=info msg="deleted datasource based on configuration" logger=provisioning.datasources name=Dashboard1 2026-03-10T13:13:44.272 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:44 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[71583]: t=2026-03-10T13:13:44+0000 lvl=info msg="inserting datasource from configuration " logger=provisioning.datasources name=Dashboard1 uid=P43CA22E17D0F9596 2026-03-10T13:13:44.272 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:44 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[71583]: t=2026-03-10T13:13:44+0000 lvl=info msg="inserting datasource from configuration " logger=provisioning.datasources name=Loki uid=P8E80F9AEF21F6940 2026-03-10T13:13:44.272 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:44 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[71583]: t=2026-03-10T13:13:44+0000 lvl=info msg="HTTP Server Listen" logger=http.server address=[::]:3000 protocol=https subUrl= socket= 2026-03-10T13:13:44.272 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:44 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[71583]: t=2026-03-10T13:13:44+0000 lvl=info msg="warming cache for startup" logger=ngalert 2026-03-10T13:13:44.272 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:13:44 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[71583]: t=2026-03-10T13:13:44+0000 lvl=info msg="starting MultiOrg Alertmanager" logger=ngalert.multiorg.alertmanager 2026-03-10T13:13:44.576 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:44 vm08 ceph-mon[49535]: Reconfiguring grafana.a (dependencies changed)... 2026-03-10T13:13:44.576 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:44 vm08 ceph-mon[49535]: Regenerating cephadm self-signed grafana TLS certificates 2026-03-10T13:13:44.576 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:44 vm08 ceph-mon[49535]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T13:13:44.576 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:44 vm08 ceph-mon[49535]: Reconfiguring daemon grafana.a on vm08 2026-03-10T13:13:44.576 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:44 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:44.577 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:44 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:44.577 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:44 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T13:13:44.577 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:44 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm00.local:9093"}]: dispatch 2026-03-10T13:13:44.577 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:44 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:44.577 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:44 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T13:13:44.577 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:44 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:44.577 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:44 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T13:13:44.577 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:44 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm00"}]: dispatch 2026-03-10T13:13:44.577 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:44 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:44.577 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:44 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T13:13:44.577 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:44 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-10T13:13:44.577 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:44 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:44.577 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:44 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T13:13:44.577 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:44 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://host.containers.internal:3000"}]: dispatch 2026-03-10T13:13:44.577 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:44 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:44.577 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:13:44 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:13:44] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-10T13:13:44.577 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:13:44 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:13:44] ENGINE Bus STOPPED 2026-03-10T13:13:44.577 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:13:44 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:13:44] ENGINE Bus STARTING 2026-03-10T13:13:44.577 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:13:44 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:13:44] ENGINE Serving on http://:::9283 2026-03-10T13:13:44.577 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:13:44 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:13:44] ENGINE Bus STARTED 2026-03-10T13:13:44.577 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:13:44 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:13:44] ENGINE Bus STOPPING 2026-03-10T13:13:44.723 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[51670]: Reconfiguring grafana.a (dependencies changed)... 2026-03-10T13:13:44.723 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[51670]: Regenerating cephadm self-signed grafana TLS certificates 2026-03-10T13:13:44.723 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[51670]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T13:13:44.723 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[51670]: Reconfiguring daemon grafana.a on vm08 2026-03-10T13:13:44.723 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:44.723 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:44.723 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T13:13:44.723 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm00.local:9093"}]: dispatch 2026-03-10T13:13:44.723 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:44.723 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T13:13:44.723 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:44.723 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T13:13:44.723 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm00"}]: dispatch 2026-03-10T13:13:44.723 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:44.723 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T13:13:44.723 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-10T13:13:44.723 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:44.723 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T13:13:44.723 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://host.containers.internal:3000"}]: dispatch 2026-03-10T13:13:44.723 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:44.723 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[47364]: Reconfiguring grafana.a (dependencies changed)... 2026-03-10T13:13:44.723 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[47364]: Regenerating cephadm self-signed grafana TLS certificates 2026-03-10T13:13:44.723 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[47364]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T13:13:44.723 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[47364]: Reconfiguring daemon grafana.a on vm08 2026-03-10T13:13:44.723 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:44.723 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:44.723 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T13:13:44.723 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm00.local:9093"}]: dispatch 2026-03-10T13:13:44.724 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:44.724 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T13:13:44.724 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:44.724 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T13:13:44.724 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm00"}]: dispatch 2026-03-10T13:13:44.724 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:44.724 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T13:13:44.724 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-10T13:13:44.724 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:44.724 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T13:13:44.724 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://host.containers.internal:3000"}]: dispatch 2026-03-10T13:13:44.724 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:44 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:45.273 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:13:44 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:13:44] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-10T13:13:45.273 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:13:44 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:13:44] ENGINE Bus STOPPED 2026-03-10T13:13:45.273 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:13:44 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:13:44] ENGINE Bus STARTING 2026-03-10T13:13:45.273 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:13:45 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:13:45] ENGINE Serving on http://:::9283 2026-03-10T13:13:45.273 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:13:45 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:13:45] ENGINE Bus STARTED 2026-03-10T13:13:45.273 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:13:45 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:13:45] ENGINE Bus STOPPING 2026-03-10T13:13:45.273 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:13:45 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:13:45] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-10T13:13:45.273 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:13:45 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:13:45] ENGINE Bus STOPPED 2026-03-10T13:13:45.273 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:13:45 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:13:45] ENGINE Bus STARTING 2026-03-10T13:13:45.273 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:13:45 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:13:45] ENGINE Serving on http://:::9283 2026-03-10T13:13:45.273 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:13:45 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:13:45] ENGINE Bus STARTED 2026-03-10T13:13:45.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:45 vm00 ceph-mon[47364]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 72 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T13:13:45.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:45 vm00 ceph-mon[47364]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T13:13:45.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:45 vm00 ceph-mon[47364]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm00.local:9093"}]: dispatch 2026-03-10T13:13:45.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:45 vm00 ceph-mon[47364]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T13:13:45.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:45 vm00 ceph-mon[47364]: Adding iSCSI gateway http://:@192.168.123.100:5000 to Dashboard 2026-03-10T13:13:45.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:45 vm00 ceph-mon[47364]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T13:13:45.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:45 vm00 ceph-mon[47364]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm00"}]: dispatch 2026-03-10T13:13:45.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:45 vm00 ceph-mon[47364]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T13:13:45.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:45 vm00 ceph-mon[47364]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-10T13:13:45.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:45 vm00 ceph-mon[47364]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T13:13:45.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:45 vm00 ceph-mon[47364]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://host.containers.internal:3000"}]: dispatch 2026-03-10T13:13:45.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:45 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:45.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:45 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:45.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:45 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:45.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:45 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:45.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:45 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:13:45.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:45 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:13:45.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:45 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:45.509 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:45 vm00 ceph-mon[51670]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 72 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T13:13:45.509 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:45 vm00 ceph-mon[51670]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T13:13:45.509 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:45 vm00 ceph-mon[51670]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm00.local:9093"}]: dispatch 2026-03-10T13:13:45.509 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:45 vm00 ceph-mon[51670]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T13:13:45.509 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:45 vm00 ceph-mon[51670]: Adding iSCSI gateway http://:@192.168.123.100:5000 to Dashboard 2026-03-10T13:13:45.509 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:45 vm00 ceph-mon[51670]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T13:13:45.509 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:45 vm00 ceph-mon[51670]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm00"}]: dispatch 2026-03-10T13:13:45.509 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:45 vm00 ceph-mon[51670]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T13:13:45.509 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:45 vm00 ceph-mon[51670]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-10T13:13:45.510 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:45 vm00 ceph-mon[51670]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T13:13:45.510 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:45 vm00 ceph-mon[51670]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://host.containers.internal:3000"}]: dispatch 2026-03-10T13:13:45.510 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:45 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:45.510 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:45 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:45.510 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:45 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:45.510 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:45 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:45.510 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:45 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:13:45.510 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:45 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:13:45.510 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:45 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:45.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:45 vm08 ceph-mon[49535]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 72 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T13:13:45.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:45 vm08 ceph-mon[49535]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T13:13:45.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:45 vm08 ceph-mon[49535]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm00.local:9093"}]: dispatch 2026-03-10T13:13:45.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:45 vm08 ceph-mon[49535]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T13:13:45.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:45 vm08 ceph-mon[49535]: Adding iSCSI gateway http://:@192.168.123.100:5000 to Dashboard 2026-03-10T13:13:45.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:45 vm08 ceph-mon[49535]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T13:13:45.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:45 vm08 ceph-mon[49535]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm00"}]: dispatch 2026-03-10T13:13:45.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:45 vm08 ceph-mon[49535]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T13:13:45.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:45 vm08 ceph-mon[49535]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-10T13:13:45.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:45 vm08 ceph-mon[49535]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T13:13:45.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:45 vm08 ceph-mon[49535]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://host.containers.internal:3000"}]: dispatch 2026-03-10T13:13:45.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:45 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:45.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:45 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:45.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:45 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:45.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:45 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:45.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:45 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:13:45.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:45 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:13:45.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:45 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:13:46.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:46 vm00 ceph-mon[47364]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:13:46.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:46 vm00 ceph-mon[51670]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:13:46.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:46 vm08 ceph-mon[49535]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:13:47.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:47 vm00 ceph-mon[47364]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 1 op/s 2026-03-10T13:13:47.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:47 vm00 ceph-mon[51670]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 1 op/s 2026-03-10T13:13:47.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:47 vm08 ceph-mon[49535]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 1 op/s 2026-03-10T13:13:49.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:48 vm00 ceph-mon[47364]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 676 B/s rd, 0 op/s 2026-03-10T13:13:49.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:48 vm00 ceph-mon[51670]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 676 B/s rd, 0 op/s 2026-03-10T13:13:49.270 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:13:49 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:13:49] "GET /metrics HTTP/1.1" 200 37523 "" "Prometheus/2.51.0" 2026-03-10T13:13:49.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:48 vm08 ceph-mon[49535]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 676 B/s rd, 0 op/s 2026-03-10T13:13:51.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:51 vm00 ceph-mon[47364]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 600 B/s rd, 0 op/s 2026-03-10T13:13:51.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:51 vm00 ceph-mon[51670]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 600 B/s rd, 0 op/s 2026-03-10T13:13:51.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:51 vm08 ceph-mon[49535]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 600 B/s rd, 0 op/s 2026-03-10T13:13:53.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:53 vm00 ceph-mon[47364]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T13:13:53.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:53 vm00 ceph-mon[51670]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T13:13:53.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:53 vm08 ceph-mon[49535]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T13:13:54.520 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:54 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:13:54.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.108:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:13:54.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:54 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:13:54.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:54 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:13:54.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:54 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:13:55.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:55 vm08 ceph-mon[49535]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 889 B/s rd, 0 op/s 2026-03-10T13:13:55.653 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:55 vm00 ceph-mon[47364]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 889 B/s rd, 0 op/s 2026-03-10T13:13:55.653 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:55 vm00 ceph-mon[51670]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 889 B/s rd, 0 op/s 2026-03-10T13:13:56.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:56 vm00 ceph-mon[47364]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:13:56.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:56 vm00 ceph-mon[51670]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:13:56.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:56 vm08 ceph-mon[49535]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:13:57.270 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:13:56 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:13:56.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm00\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:13:57.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:57 vm00 ceph-mon[47364]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:13:57.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:57 vm00 ceph-mon[51670]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:13:57.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:57 vm08 ceph-mon[49535]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:13:59.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:13:59 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:13:59] "GET /metrics HTTP/1.1" 200 37528 "" "Prometheus/2.51.0" 2026-03-10T13:13:59.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:13:59 vm08 ceph-mon[49535]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:13:59.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:13:59 vm00 ceph-mon[47364]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:13:59.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:13:59 vm00 ceph-mon[51670]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:01.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:01 vm00 ceph-mon[47364]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:01.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:01 vm00 ceph-mon[51670]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:01.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:01 vm08 ceph-mon[49535]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:03.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:03 vm00 ceph-mon[47364]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:14:03.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:03 vm00 ceph-mon[51670]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:14:03.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:03 vm08 ceph-mon[49535]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:14:04.520 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:14:04 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:14:04.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.108:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:14:05.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:05 vm00 ceph-mon[47364]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:05.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:05 vm00 ceph-mon[51670]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:05.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:05 vm08 ceph-mon[49535]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:06.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:06 vm00 ceph-mon[47364]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:14:06.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:06 vm00 ceph-mon[51670]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:14:06.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:06 vm08 ceph-mon[49535]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:14:07.270 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:14:06 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:14:06.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm00\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:14:07.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:07 vm00 ceph-mon[47364]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:14:07.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:07 vm00 ceph-mon[51670]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:14:07.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:07 vm08 ceph-mon[49535]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:14:09.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:14:09 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:14:09] "GET /metrics HTTP/1.1" 200 37528 "" "Prometheus/2.51.0" 2026-03-10T13:14:09.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:09 vm08 ceph-mon[49535]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:09.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:09 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:14:09.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:09 vm00 ceph-mon[47364]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:09.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:09 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:14:09.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:09 vm00 ceph-mon[51670]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:09.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:09 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:14:11.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:11 vm00 ceph-mon[47364]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:11.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:11 vm00 ceph-mon[51670]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:11.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:11 vm08 ceph-mon[49535]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:13.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:13 vm00 ceph-mon[47364]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:14:13.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:13 vm00 ceph-mon[51670]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:14:13.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:13 vm08 ceph-mon[49535]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:14:14.520 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:14:14 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:14:14.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.108:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:14:15.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:15 vm00 ceph-mon[47364]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:15.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:15 vm00 ceph-mon[51670]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:15.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:15 vm08 ceph-mon[49535]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:16.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:16 vm00 ceph-mon[47364]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:14:16.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:16 vm00 ceph-mon[51670]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:14:16.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:16 vm08 ceph-mon[49535]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:14:17.270 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:14:16 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:14:16.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm00\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:14:17.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:17 vm00 ceph-mon[47364]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:14:17.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:17 vm00 ceph-mon[51670]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:14:17.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:17 vm08 ceph-mon[49535]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:14:19.368 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:14:19 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:14:19] "GET /metrics HTTP/1.1" 200 37525 "" "Prometheus/2.51.0" 2026-03-10T13:14:19.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:19 vm00 ceph-mon[47364]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:19.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:19 vm00 ceph-mon[51670]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:19.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:19 vm08 ceph-mon[49535]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:21.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:21 vm00 ceph-mon[47364]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:21.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:21 vm00 ceph-mon[51670]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:21.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:21 vm08 ceph-mon[49535]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:23.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:23 vm00 ceph-mon[47364]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:14:23.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:23 vm00 ceph-mon[51670]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:14:23.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:23 vm08 ceph-mon[49535]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:14:24.520 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:14:24 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:14:24.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.108:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:14:24.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:24 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:14:24.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:24 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:14:24.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:24 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:14:25.727 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:25 vm00 ceph-mon[47364]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:25.727 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:25 vm00 ceph-mon[51670]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:25.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:25 vm08 ceph-mon[49535]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:26.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:26 vm00 ceph-mon[47364]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:14:26.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:26 vm00 ceph-mon[51670]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:14:26.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:26 vm08 ceph-mon[49535]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:14:27.270 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:14:26 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:14:26.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm00\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:14:27.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:27 vm00 ceph-mon[47364]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:14:27.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:27 vm00 ceph-mon[51670]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:14:27.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:27 vm08 ceph-mon[49535]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:14:29.479 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:14:29 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:14:29] "GET /metrics HTTP/1.1" 200 37525 "" "Prometheus/2.51.0" 2026-03-10T13:14:29.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:29 vm00 ceph-mon[47364]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:29.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:29 vm00 ceph-mon[51670]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:29.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:29 vm08 ceph-mon[49535]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:31.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:31 vm00 ceph-mon[47364]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:31.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:31 vm00 ceph-mon[51670]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:31.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:31 vm08 ceph-mon[49535]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:33.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:33 vm00 ceph-mon[47364]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:14:33.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:33 vm00 ceph-mon[51670]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:14:33.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:33 vm08 ceph-mon[49535]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:14:34.520 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:14:34 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:14:34.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.108:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:14:35.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:35 vm00 ceph-mon[47364]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:35.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:35 vm00 ceph-mon[51670]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:35.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:35 vm08 ceph-mon[49535]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:36.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:36 vm08 ceph-mon[49535]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:14:37.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:36 vm00 ceph-mon[47364]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:14:37.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:36 vm00 ceph-mon[51670]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:14:37.270 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:14:36 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:14:36.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm00\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:14:37.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:37 vm08 ceph-mon[49535]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:14:38.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:37 vm00 ceph-mon[47364]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:14:38.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:37 vm00 ceph-mon[51670]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:14:39.512 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:14:39 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:14:39] "GET /metrics HTTP/1.1" 200 37525 "" "Prometheus/2.51.0" 2026-03-10T13:14:39.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:39 vm08 ceph-mon[49535]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:39.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:39 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:14:40.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:39 vm00 ceph-mon[47364]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:40.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:39 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:14:40.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:39 vm00 ceph-mon[51670]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:40.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:39 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:14:41.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:41 vm08 ceph-mon[49535]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:42.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:41 vm00 ceph-mon[47364]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:42.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:41 vm00 ceph-mon[51670]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:44.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:43 vm00 ceph-mon[47364]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:14:44.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:43 vm00 ceph-mon[51670]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:14:44.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:43 vm08 ceph-mon[49535]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:14:44.520 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:14:44 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:14:44.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.108:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:14:45.801 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:45 vm08 ceph-mon[49535]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:45.947 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:45 vm00 ceph-mon[47364]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:45.947 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:45 vm00 ceph-mon[51670]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:46.948 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:14:46 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:14:46.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm00\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:14:46.948 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:46 vm08 ceph-mon[49535]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:14:46.948 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:46 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:14:46.948 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:46 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:14:46.948 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:46 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:14:46.948 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:46 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:14:47.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:46 vm00 ceph-mon[47364]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:14:47.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:46 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:14:47.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:46 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:14:47.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:46 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:14:47.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:46 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:14:47.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:46 vm00 ceph-mon[51670]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:14:47.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:46 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:14:47.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:46 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:14:47.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:46 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:14:47.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:46 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:14:48.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:47 vm00 ceph-mon[51670]: pgmap v51: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:14:48.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:47 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:14:48.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:47 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:14:48.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:47 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:14:48.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:47 vm00 ceph-mon[47364]: pgmap v51: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:14:48.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:47 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:14:48.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:47 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:14:48.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:47 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:14:48.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:47 vm08 ceph-mon[49535]: pgmap v51: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:14:48.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:47 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:14:48.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:47 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:14:48.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:47 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:14:49.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:14:49 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:14:49] "GET /metrics HTTP/1.1" 200 37526 "" "Prometheus/2.51.0" 2026-03-10T13:14:50.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:49 vm00 ceph-mon[51670]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:50.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:49 vm00 ceph-mon[47364]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:50.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:49 vm08 ceph-mon[49535]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:52.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:51 vm00 ceph-mon[47364]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:52.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:51 vm00 ceph-mon[51670]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:52.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:51 vm08 ceph-mon[49535]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:54.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:53 vm00 ceph-mon[47364]: pgmap v54: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:14:54.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:53 vm00 ceph-mon[51670]: pgmap v54: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:14:54.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:53 vm08 ceph-mon[49535]: pgmap v54: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:14:54.520 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:14:54 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:14:54.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.108:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:14:55.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:54 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:14:55.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:54 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:14:55.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:54 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:14:56.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:55 vm00 ceph-mon[47364]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:56.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:55 vm00 ceph-mon[51670]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:56.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:55 vm08 ceph-mon[49535]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:14:56.948 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:56 vm08 ceph-mon[49535]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:14:57.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:56 vm00 ceph-mon[47364]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:14:57.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:56 vm00 ceph-mon[51670]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:14:57.270 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:14:56 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:14:56.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm00\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:14:58.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:57 vm00 ceph-mon[47364]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:14:58.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:57 vm00 ceph-mon[51670]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:14:58.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:57 vm08 ceph-mon[49535]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:14:59.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:14:59 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:14:59] "GET /metrics HTTP/1.1" 200 37521 "" "Prometheus/2.51.0" 2026-03-10T13:15:00.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:14:59 vm00 ceph-mon[47364]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:00.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:14:59 vm00 ceph-mon[51670]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:00.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:14:59 vm08 ceph-mon[49535]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:02.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:01 vm00 ceph-mon[47364]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:02.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:01 vm00 ceph-mon[51670]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:02.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:01 vm08 ceph-mon[49535]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:04.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:03 vm00 ceph-mon[47364]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:15:04.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:03 vm00 ceph-mon[51670]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:15:04.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:03 vm08 ceph-mon[49535]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:15:04.520 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:15:04 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:15:04.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.108:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:15:06.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:05 vm00 ceph-mon[47364]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:06.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:05 vm00 ceph-mon[51670]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:06.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:05 vm08 ceph-mon[49535]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:06.948 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:06 vm08 ceph-mon[49535]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:15:07.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:06 vm00 ceph-mon[47364]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:15:07.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:06 vm00 ceph-mon[51670]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:15:07.270 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:15:06 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:15:06.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm00\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:15:08.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:07 vm00 ceph-mon[47364]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:15:08.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:07 vm00 ceph-mon[51670]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:15:08.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:07 vm08 ceph-mon[49535]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:15:09.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:15:09 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:15:09] "GET /metrics HTTP/1.1" 200 37521 "" "Prometheus/2.51.0" 2026-03-10T13:15:10.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:09 vm00 ceph-mon[47364]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:10.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:09 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:15:10.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:09 vm00 ceph-mon[51670]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:10.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:09 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:15:10.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:09 vm08 ceph-mon[49535]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:10.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:09 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:15:12.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:11 vm00 ceph-mon[47364]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:12.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:11 vm00 ceph-mon[51670]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:12.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:11 vm08 ceph-mon[49535]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:14.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:13 vm00 ceph-mon[47364]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:15:14.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:13 vm00 ceph-mon[51670]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:15:14.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:13 vm08 ceph-mon[49535]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:15:14.520 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:15:14 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:15:14.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.108:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:15:16.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:15 vm00 ceph-mon[47364]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:16.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:15 vm00 ceph-mon[51670]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:16.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:15 vm08 ceph-mon[49535]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:16.948 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:16 vm08 ceph-mon[49535]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:15:17.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:16 vm00 ceph-mon[47364]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:15:17.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:16 vm00 ceph-mon[51670]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:15:17.270 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:15:16 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:15:16.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm00\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:15:18.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:17 vm00 ceph-mon[47364]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:15:18.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:17 vm00 ceph-mon[51670]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:15:18.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:17 vm08 ceph-mon[49535]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:15:19.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:15:19 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:15:19] "GET /metrics HTTP/1.1" 200 37514 "" "Prometheus/2.51.0" 2026-03-10T13:15:20.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:19 vm00 ceph-mon[47364]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:20.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:19 vm00 ceph-mon[51670]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:20.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:19 vm08 ceph-mon[49535]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:22.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:21 vm00 ceph-mon[47364]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:22.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:21 vm00 ceph-mon[51670]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:22.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:21 vm08 ceph-mon[49535]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:23.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:22 vm00 ceph-mon[47364]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:15:23.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:22 vm00 ceph-mon[51670]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:15:23.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:22 vm08 ceph-mon[49535]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:15:24.520 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:15:24 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:15:24.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.108:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:15:25.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:24 vm00 ceph-mon[47364]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:25.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:24 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:15:25.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:24 vm00 ceph-mon[51670]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:25.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:24 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:15:25.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:24 vm08 ceph-mon[49535]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:25.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:24 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:15:26.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:25 vm00 ceph-mon[47364]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:15:26.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:25 vm00 ceph-mon[51670]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:15:26.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:25 vm08 ceph-mon[49535]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:15:27.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:27 vm00 ceph-mon[47364]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:15:27.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:27 vm00 ceph-mon[51670]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:15:27.270 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:15:26 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:15:26.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm00\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:15:27.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:27 vm08 ceph-mon[49535]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:15:29.270 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:15:29 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:15:29] "GET /metrics HTTP/1.1" 200 37515 "" "Prometheus/2.51.0" 2026-03-10T13:15:29.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:29 vm08 ceph-mon[49535]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:29.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:29 vm00 ceph-mon[47364]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:29.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:29 vm00 ceph-mon[51670]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:31.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:31 vm08 ceph-mon[49535]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:31.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:31 vm00 ceph-mon[47364]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:31.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:31 vm00 ceph-mon[51670]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:33.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:33 vm08 ceph-mon[49535]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:15:33.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:33 vm00 ceph-mon[47364]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:15:33.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:33 vm00 ceph-mon[51670]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:15:34.520 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:15:34 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:15:34.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.108:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:15:35.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:35 vm00 ceph-mon[47364]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:35.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:35 vm00 ceph-mon[51670]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:35.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:35 vm08 ceph-mon[49535]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:36.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:36 vm00 ceph-mon[47364]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:15:36.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:36 vm00 ceph-mon[51670]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:15:36.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:36 vm08 ceph-mon[49535]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:15:37.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:37 vm08 ceph-mon[49535]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:15:37.271 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:15:36 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:15:36.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm00\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:15:37.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:37 vm00 ceph-mon[47364]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:15:37.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:37 vm00 ceph-mon[51670]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:15:39.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:39 vm00 ceph-mon[47364]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:39.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:39 vm00 ceph-mon[51670]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:39.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:15:39 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:15:39] "GET /metrics HTTP/1.1" 200 37515 "" "Prometheus/2.51.0" 2026-03-10T13:15:39.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:39 vm08 ceph-mon[49535]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:40.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:40 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:15:40.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:40 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:15:40.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:40 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:15:41.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:41 vm00 ceph-mon[47364]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:41.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:41 vm00 ceph-mon[51670]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:41.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:41 vm08 ceph-mon[49535]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:43.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:43 vm00 ceph-mon[47364]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:15:43.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:43 vm00 ceph-mon[51670]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:15:43.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:43 vm08 ceph-mon[49535]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:15:44.520 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:15:44 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:15:44.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.108:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:15:45.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:45 vm00 ceph-mon[47364]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:45.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:45 vm00 ceph-mon[51670]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:45.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:45 vm08 ceph-mon[49535]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:46.421 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:46 vm08 ceph-mon[49535]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:15:46.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:46 vm00 ceph-mon[47364]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:15:46.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:46 vm00 ceph-mon[51670]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:15:47.220 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:47 vm08 ceph-mon[49535]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:15:47.220 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:47 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:15:47.220 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:47 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:15:47.220 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:47 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:15:47.221 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:47 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:15:47.221 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:15:46 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:15:46.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm00\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:15:47.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:47 vm00 ceph-mon[47364]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:15:47.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:47 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:15:47.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:47 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:15:47.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:47 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:15:47.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:47 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:15:47.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:47 vm00 ceph-mon[51670]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:15:47.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:47 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:15:47.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:47 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:15:47.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:47 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:15:47.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:47 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:15:49.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:48 vm00 ceph-mon[47364]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:49.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:48 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:15:49.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:48 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:15:49.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:48 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:15:49.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:48 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:15:49.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:48 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:15:49.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:48 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:15:49.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:48 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:15:49.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:48 vm00 ceph-mon[51670]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:49.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:48 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:15:49.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:48 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:15:49.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:48 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:15:49.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:48 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:15:49.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:48 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:15:49.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:48 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:15:49.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:48 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:15:49.271 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:15:49 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:15:49] "GET /metrics HTTP/1.1" 200 37533 "" "Prometheus/2.51.0" 2026-03-10T13:15:49.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:48 vm08 ceph-mon[49535]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:49.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:48 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:15:49.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:48 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:15:49.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:48 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:15:49.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:48 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:15:49.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:48 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:15:49.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:48 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:15:49.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:48 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:15:51.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:50 vm00 ceph-mon[47364]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:51.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:50 vm00 ceph-mon[51670]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:51.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:50 vm08 ceph-mon[49535]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:53.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:52 vm00 ceph-mon[47364]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:15:53.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:52 vm00 ceph-mon[51670]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:15:53.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:52 vm08 ceph-mon[49535]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:15:54.520 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:15:54 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:15:54.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.108:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:15:55.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:55 vm08 ceph-mon[49535]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:55.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:55 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:15:55.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:55 vm00 ceph-mon[47364]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:55.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:55 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:15:55.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:55 vm00 ceph-mon[51670]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:55.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:55 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:15:56.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:56 vm08 ceph-mon[49535]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:15:56.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:56 vm00 ceph-mon[47364]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:15:56.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:56 vm00 ceph-mon[51670]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:15:57.164 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:15:56 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:15:56.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm00\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:15:57.165 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:57 vm08 ceph-mon[49535]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:15:57.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:57 vm00 ceph-mon[47364]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:15:57.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:57 vm00 ceph-mon[51670]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:15:59.270 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:15:59 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:15:59] "GET /metrics HTTP/1.1" 200 37535 "" "Prometheus/2.51.0" 2026-03-10T13:15:59.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:15:59 vm08 ceph-mon[49535]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:59.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:15:59 vm00 ceph-mon[47364]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:15:59.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:15:59 vm00 ceph-mon[51670]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:01.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:01 vm00 ceph-mon[47364]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:01.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:01 vm00 ceph-mon[51670]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:01.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:01 vm08 ceph-mon[49535]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:03.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:03 vm00 ceph-mon[47364]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:03.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:03 vm00 ceph-mon[51670]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:03.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:03 vm08 ceph-mon[49535]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:04.520 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:16:04 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:16:04.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.108:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:16:05.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:05 vm00 ceph-mon[47364]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:05.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:05 vm00 ceph-mon[51670]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:05.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:05 vm08 ceph-mon[49535]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:06.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:06 vm00 ceph-mon[47364]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:16:06.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:06 vm00 ceph-mon[51670]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:16:06.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:06 vm08 ceph-mon[49535]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:16:07.270 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:16:06 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:16:06.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm00\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:16:07.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:07 vm08 ceph-mon[49535]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:07.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:07 vm00 ceph-mon[47364]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:07.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:07 vm00 ceph-mon[51670]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:09.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:09 vm00 ceph-mon[47364]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:09.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:09 vm00 ceph-mon[51670]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:09.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:16:09 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:16:09] "GET /metrics HTTP/1.1" 200 37535 "" "Prometheus/2.51.0" 2026-03-10T13:16:09.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:09 vm08 ceph-mon[49535]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:10.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:10 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:16:10.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:10 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:16:10.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:10 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:16:11.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:11 vm00 ceph-mon[47364]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:11.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:11 vm00 ceph-mon[51670]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:11.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:11 vm08 ceph-mon[49535]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:13.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:13 vm00 ceph-mon[47364]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:13.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:13 vm00 ceph-mon[51670]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:13.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:13 vm08 ceph-mon[49535]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:14.520 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:16:14 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:16:14.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.108:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:16:15.441 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:15 vm00 ceph-mon[51670]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:15.441 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:15 vm00 ceph-mon[47364]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:15.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:15 vm08 ceph-mon[49535]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:16.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:16 vm00 ceph-mon[47364]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:16:16.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:16 vm00 ceph-mon[51670]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:16:16.422 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:16 vm08 ceph-mon[49535]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:16:17.270 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:16:16 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:16:16.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm00\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:16:17.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:17 vm08 ceph-mon[49535]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:17.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:17 vm00 ceph-mon[47364]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:17.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:17 vm00 ceph-mon[51670]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:19.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:19 vm00 ceph-mon[47364]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:19.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:19 vm00 ceph-mon[51670]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:19.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:16:19 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:16:19] "GET /metrics HTTP/1.1" 200 37530 "" "Prometheus/2.51.0" 2026-03-10T13:16:19.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:19 vm08 ceph-mon[49535]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:21.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:21 vm00 ceph-mon[47364]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:21.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:21 vm00 ceph-mon[51670]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:21.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:21 vm08 ceph-mon[49535]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:23.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:23 vm00 ceph-mon[47364]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:23.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:23 vm00 ceph-mon[51670]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:23.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:23 vm08 ceph-mon[49535]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:24.384 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch daemon redeploy "mgr.$(ceph mgr dump -f json | jq .standbys | jq .[] | jq -r .name)" --image quay.ceph.io/ceph-ci/ceph:$sha1' 2026-03-10T13:16:24.520 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:16:24 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:16:24.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.108:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:16:25.134 INFO:teuthology.orchestra.run.vm00.stdout:Scheduled to redeploy mgr.y on host 'vm00' 2026-03-10T13:16:25.196 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps --refresh' 2026-03-10T13:16:25.356 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:25 vm00 ceph-mon[47364]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:25.357 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:25 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:16:25.357 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:25 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/213559783' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-10T13:16:25.358 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:25 vm00 ceph-mon[51670]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:25.358 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:25 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:16:25.358 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:25 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/213559783' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-10T13:16:25.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:25 vm08 ceph-mon[49535]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:25.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:25 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:16:25.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:25 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/213559783' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-10T13:16:25.691 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T13:16:25.691 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (2m) 37s ago 9m 22.7M - 0.25.0 c8568f914cd2 12fde3cf83cb 2026-03-10T13:16:25.691 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm08 *:3000 running (2m) 37s ago 9m 46.6M - dad864ee21e9 263cac442a99 2026-03-10T13:16:25.691 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dezodo vm00 running (2m) 37s ago 9m 48.8M - 3.5 e1d6a67b021e 820a5402f9e5 2026-03-10T13:16:25.691 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm08 *:8443,9283 running (5m) 37s ago 11m 555M - 19.2.3-678-ge911bdeb 654f31e6858e 62b908c184a8 2026-03-10T13:16:25.691 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:9283 running (11m) 37s ago 11m 418M - 17.2.0 e1d6a67b021e b259475ee6d8 2026-03-10T13:16:25.691 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (11m) 37s ago 11m 63.4M 2048M 17.2.0 e1d6a67b021e f0e3f322471c 2026-03-10T13:16:25.691 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm08 running (11m) 37s ago 11m 44.6M 2048M 17.2.0 e1d6a67b021e d3c1458bc898 2026-03-10T13:16:25.691 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (11m) 37s ago 11m 44.3M 2048M 17.2.0 e1d6a67b021e d00b7fd44c23 2026-03-10T13:16:25.691 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (2m) 37s ago 9m 10.5M - 1.7.0 72c9c2088986 bcf883401619 2026-03-10T13:16:25.691 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm08 *:9100 running (2m) 37s ago 9m 9424k - 1.7.0 72c9c2088986 4ac83f03f818 2026-03-10T13:16:25.691 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (10m) 37s ago 10m 50.6M 4096M 17.2.0 e1d6a67b021e 2919c7073fa7 2026-03-10T13:16:25.691 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (10m) 37s ago 10m 55.9M 4096M 17.2.0 e1d6a67b021e 647927dc41ea 2026-03-10T13:16:25.691 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (10m) 37s ago 10m 53.2M 4096M 17.2.0 e1d6a67b021e 1e417e82c2b9 2026-03-10T13:16:25.691 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (10m) 37s ago 10m 50.3M 4096M 17.2.0 e1d6a67b021e 98bb6d7ea69f 2026-03-10T13:16:25.691 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm08 running (10m) 37s ago 10m 51.5M 4096M 17.2.0 e1d6a67b021e e349440ca776 2026-03-10T13:16:25.691 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm08 running (10m) 37s ago 10m 53.6M 4096M 17.2.0 e1d6a67b021e 9a6a2a67cc99 2026-03-10T13:16:25.691 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm08 running (10m) 37s ago 10m 49.1M 4096M 17.2.0 e1d6a67b021e c27676916d52 2026-03-10T13:16:25.691 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm08 running (9m) 37s ago 9m 50.6M 4096M 17.2.0 e1d6a67b021e bf67951990a5 2026-03-10T13:16:25.691 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm08 *:9095 running (2m) 37s ago 9m 52.5M - 2.51.0 1d3b7f56885b 5ef54cde2aad 2026-03-10T13:16:25.691 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.tvlvzo vm00 *:8000 running (9m) 37s ago 9m 93.3M - 17.2.0 e1d6a67b021e be24eac16807 2026-03-10T13:16:25.691 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm08.ljayps vm08 *:8000 running (9m) 37s ago 9m 90.8M - 17.2.0 e1d6a67b021e 67b525427823 2026-03-10T13:16:25.734 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'sleep 180' 2026-03-10T13:16:26.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:26 vm00 ceph-mon[47364]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:16:26.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:26 vm00 ceph-mon[47364]: from='client.15015 -' entity='client.admin' cmd=[{"prefix": "orch daemon redeploy", "name": "mgr.y", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:16:26.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:26 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:16:26.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:26 vm00 ceph-mon[47364]: Schedule redeploy daemon mgr.y 2026-03-10T13:16:26.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:26 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:16:26.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:26 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:16:26.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:26 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:16:26.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:26 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:16:26.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:26 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:16:26.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:26 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T13:16:26.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:26 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T13:16:26.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:26 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T13:16:26.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:26 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:16:26.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:26 vm00 ceph-mon[47364]: Deploying daemon mgr.y on vm00 2026-03-10T13:16:26.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:26 vm00 ceph-mon[47364]: from='client.24859 -' entity='client.admin' cmd=[{"prefix": "orch ps", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:16:26.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:26 vm00 ceph-mon[51670]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:16:26.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:26 vm00 ceph-mon[51670]: from='client.15015 -' entity='client.admin' cmd=[{"prefix": "orch daemon redeploy", "name": "mgr.y", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:16:26.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:26 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:16:26.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:26 vm00 ceph-mon[51670]: Schedule redeploy daemon mgr.y 2026-03-10T13:16:26.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:26 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:16:26.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:26 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:16:26.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:26 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:16:26.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:26 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:16:26.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:26 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:16:26.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:26 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T13:16:26.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:26 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T13:16:26.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:26 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T13:16:26.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:26 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:16:26.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:26 vm00 ceph-mon[51670]: Deploying daemon mgr.y on vm00 2026-03-10T13:16:26.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:26 vm00 ceph-mon[51670]: from='client.24859 -' entity='client.admin' cmd=[{"prefix": "orch ps", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:16:26.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:26 vm08 ceph-mon[49535]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:16:26.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:26 vm08 ceph-mon[49535]: from='client.15015 -' entity='client.admin' cmd=[{"prefix": "orch daemon redeploy", "name": "mgr.y", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:16:26.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:26 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:16:26.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:26 vm08 ceph-mon[49535]: Schedule redeploy daemon mgr.y 2026-03-10T13:16:26.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:26 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:16:26.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:26 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:16:26.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:26 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:16:26.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:26 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:16:26.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:26 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:16:26.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:26 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T13:16:26.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:26 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T13:16:26.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:26 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T13:16:26.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:26 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:16:26.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:26 vm08 ceph-mon[49535]: Deploying daemon mgr.y on vm00 2026-03-10T13:16:26.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:26 vm08 ceph-mon[49535]: from='client.24859 -' entity='client.admin' cmd=[{"prefix": "orch ps", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:16:27.270 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:16:26 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:16:26.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm00\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:16:27.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:27 vm08 ceph-mon[49535]: pgmap v101: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:27.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:27 vm00 ceph-mon[51670]: pgmap v101: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:27.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:27 vm00 ceph-mon[47364]: pgmap v101: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:29.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:29 vm00 ceph-mon[51670]: pgmap v102: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:29.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:29 vm00 ceph-mon[47364]: pgmap v102: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:29.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:16:29 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:16:29] "GET /metrics HTTP/1.1" 200 37534 "" "Prometheus/2.51.0" 2026-03-10T13:16:29.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:29 vm08 ceph-mon[49535]: pgmap v102: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:31.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:31 vm00 ceph-mon[51670]: pgmap v103: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:31.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:31 vm00 ceph-mon[47364]: pgmap v103: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:31.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:31 vm08 ceph-mon[49535]: pgmap v103: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:33.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:33 vm00 ceph-mon[47364]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:33.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:33 vm00 ceph-mon[51670]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:33.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:33 vm08 ceph-mon[49535]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:34.520 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:16:34 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:16:34.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.108:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:16:35.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:35 vm00 ceph-mon[47364]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:35.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:35 vm00 ceph-mon[51670]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:35.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:35 vm08 ceph-mon[49535]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:36 vm00 ceph-mon[47364]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:16:36.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:36 vm00 ceph-mon[51670]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:16:36.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:36 vm08 ceph-mon[49535]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:16:37.270 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:16:36 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:16:36.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm00\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:16:37.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:37 vm08 ceph-mon[49535]: pgmap v106: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:37.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:37 vm00 ceph-mon[47364]: pgmap v106: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:37.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:37 vm00 ceph-mon[51670]: pgmap v106: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:39.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:39 vm00 ceph-mon[47364]: pgmap v107: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:39.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:39 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:16:39.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:39 vm00 ceph-mon[51670]: pgmap v107: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:39.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:39 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:16:39.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:16:39 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:16:39] "GET /metrics HTTP/1.1" 200 37534 "" "Prometheus/2.51.0" 2026-03-10T13:16:39.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:39 vm08 ceph-mon[49535]: pgmap v107: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:39.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:39 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:16:41.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:41 vm00 ceph-mon[47364]: pgmap v108: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:41.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:41 vm00 ceph-mon[51670]: pgmap v108: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:41.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:41 vm08 ceph-mon[49535]: pgmap v108: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:43.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:43 vm00 ceph-mon[51670]: pgmap v109: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:43.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:43 vm00 ceph-mon[47364]: pgmap v109: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:43.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:43 vm08 ceph-mon[49535]: pgmap v109: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:44.520 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:16:44 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:16:44.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.108:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:16:45.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:45 vm00 ceph-mon[47364]: pgmap v110: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:45.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:45 vm00 ceph-mon[51670]: pgmap v110: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:45.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:45 vm08 ceph-mon[49535]: pgmap v110: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:46.466 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:46 vm00 ceph-mon[47364]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:16:46.466 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:46 vm00 ceph-mon[51670]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:16:46.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:46 vm08 ceph-mon[49535]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:16:47.215 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:16:46 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:16:46.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm00\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:16:47.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:47 vm00 ceph-mon[47364]: pgmap v111: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:47.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:47 vm00 ceph-mon[51670]: pgmap v111: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:47.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:47 vm08 ceph-mon[49535]: pgmap v111: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:49.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:49 vm00 ceph-mon[51670]: pgmap v112: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:49.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:49 vm00 ceph-mon[47364]: pgmap v112: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:49.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:16:49 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:16:49] "GET /metrics HTTP/1.1" 200 37531 "" "Prometheus/2.51.0" 2026-03-10T13:16:49.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:49 vm08 ceph-mon[49535]: pgmap v112: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:51.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:51 vm00 ceph-mon[47364]: pgmap v113: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:51.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:51 vm00 ceph-mon[51670]: pgmap v113: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:51.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:51 vm08 ceph-mon[49535]: pgmap v113: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:53.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:53 vm00 ceph-mon[51670]: pgmap v114: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:53.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:53 vm00 ceph-mon[47364]: pgmap v114: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:53.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:53 vm08 ceph-mon[49535]: pgmap v114: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:54.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:54 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:16:54.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:54 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:16:54.520 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:16:54 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:16:54.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.108:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:16:54.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:54 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:16:55.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:55 vm00 ceph-mon[47364]: pgmap v115: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:55.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:55 vm00 ceph-mon[51670]: pgmap v115: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:55.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:55 vm08 ceph-mon[49535]: pgmap v115: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:56.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:56 vm08 ceph-mon[49535]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:16:56.744 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:56 vm00 ceph-mon[47364]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:16:56.744 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:56 vm00 ceph-mon[51670]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:16:57.193 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:16:56 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:16:56.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm00\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:16:57.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:57 vm00 ceph-mon[47364]: pgmap v116: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:57.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:57 vm00 ceph-mon[51670]: pgmap v116: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:57.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:57 vm08 ceph-mon[49535]: pgmap v116: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:16:59.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:16:59 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:16:59] "GET /metrics HTTP/1.1" 200 37528 "" "Prometheus/2.51.0" 2026-03-10T13:16:59.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:16:59 vm08 ceph-mon[49535]: pgmap v117: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:59.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:16:59 vm00 ceph-mon[47364]: pgmap v117: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:16:59.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:16:59 vm00 ceph-mon[51670]: pgmap v117: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:01.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:01 vm00 ceph-mon[51670]: pgmap v118: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:17:01.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:01 vm00 ceph-mon[47364]: pgmap v118: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:17:01.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:01 vm08 ceph-mon[49535]: pgmap v118: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:17:03.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:03 vm00 ceph-mon[51670]: pgmap v119: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:03.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:03 vm00 ceph-mon[47364]: pgmap v119: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:03.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:03 vm08 ceph-mon[49535]: pgmap v119: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:04.520 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:17:04 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:17:04.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.108:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:17:05.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:05 vm00 ceph-mon[47364]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:05.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:05 vm00 ceph-mon[51670]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:05.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:05 vm08 ceph-mon[49535]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:06.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:06 vm00 ceph-mon[47364]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:17:06.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:06 vm00 ceph-mon[51670]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:17:06.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:06 vm08 ceph-mon[49535]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:17:07.270 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:17:06 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:17:06.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm00\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:17:07.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:07 vm00 ceph-mon[51670]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:17:07.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:07 vm00 ceph-mon[47364]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:17:07.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:07 vm08 ceph-mon[49535]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:17:09.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:17:09 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:17:09] "GET /metrics HTTP/1.1" 200 37528 "" "Prometheus/2.51.0" 2026-03-10T13:17:09.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:09 vm08 ceph-mon[49535]: pgmap v122: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:09.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:09 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:17:09.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:09 vm00 ceph-mon[47364]: pgmap v122: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:09.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:09 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:17:09.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:09 vm00 ceph-mon[51670]: pgmap v122: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:09.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:09 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:17:11.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:11 vm00 ceph-mon[47364]: pgmap v123: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:17:11.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:11 vm00 ceph-mon[51670]: pgmap v123: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:17:11.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:11 vm08 ceph-mon[49535]: pgmap v123: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:17:13.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:13 vm00 ceph-mon[47364]: pgmap v124: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:13.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:13 vm00 ceph-mon[51670]: pgmap v124: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:13.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:13 vm08 ceph-mon[49535]: pgmap v124: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:14.520 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:17:14 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:17:14.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.108:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:17:15.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:15 vm00 ceph-mon[47364]: pgmap v125: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:15.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:15 vm00 ceph-mon[51670]: pgmap v125: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:15.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:15 vm08 ceph-mon[49535]: pgmap v125: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:16.671 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:16 vm08 ceph-mon[49535]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:17:16.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:16 vm00 ceph-mon[47364]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:17:16.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:16 vm00 ceph-mon[51670]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:17:17.270 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:17:16 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:17:16.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm00\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:17:17.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:17 vm00 ceph-mon[47364]: pgmap v126: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 14 KiB/s rd, 0 B/s wr, 23 op/s 2026-03-10T13:17:17.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:17 vm00 ceph-mon[51670]: pgmap v126: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 14 KiB/s rd, 0 B/s wr, 23 op/s 2026-03-10T13:17:17.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:17 vm08 ceph-mon[49535]: pgmap v126: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 14 KiB/s rd, 0 B/s wr, 23 op/s 2026-03-10T13:17:19.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:17:19 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:17:19] "GET /metrics HTTP/1.1" 200 37534 "" "Prometheus/2.51.0" 2026-03-10T13:17:19.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:19 vm08 ceph-mon[49535]: pgmap v127: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 14 KiB/s rd, 0 B/s wr, 22 op/s 2026-03-10T13:17:19.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:19 vm00 ceph-mon[47364]: pgmap v127: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 14 KiB/s rd, 0 B/s wr, 22 op/s 2026-03-10T13:17:19.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:19 vm00 ceph-mon[51670]: pgmap v127: 161 pgs: 161 active+clean; 457 KiB data, 73 MiB used, 160 GiB / 160 GiB avail; 14 KiB/s rd, 0 B/s wr, 22 op/s 2026-03-10T13:17:21.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:21 vm00 ceph-mon[47364]: pgmap v128: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 73 KiB/s rd, 0 B/s wr, 120 op/s 2026-03-10T13:17:21.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:21 vm00 ceph-mon[51670]: pgmap v128: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 73 KiB/s rd, 0 B/s wr, 120 op/s 2026-03-10T13:17:21.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:21 vm08 ceph-mon[49535]: pgmap v128: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 73 KiB/s rd, 0 B/s wr, 120 op/s 2026-03-10T13:17:23.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:23 vm00 ceph-mon[47364]: pgmap v129: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 0 B/s wr, 119 op/s 2026-03-10T13:17:23.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:23 vm00 ceph-mon[51670]: pgmap v129: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 0 B/s wr, 119 op/s 2026-03-10T13:17:23.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:23 vm08 ceph-mon[49535]: pgmap v129: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 0 B/s wr, 119 op/s 2026-03-10T13:17:24.403 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:17:24 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:17:24.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.108:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:17:24.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:24 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:17:24.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:24 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:17:24.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:24 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:17:25.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:25 vm00 ceph-mon[47364]: pgmap v130: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 0 B/s wr, 119 op/s 2026-03-10T13:17:25.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:25 vm00 ceph-mon[51670]: pgmap v130: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 0 B/s wr, 119 op/s 2026-03-10T13:17:25.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:25 vm08 ceph-mon[49535]: pgmap v130: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 0 B/s wr, 119 op/s 2026-03-10T13:17:26.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:26 vm00 ceph-mon[47364]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:17:26.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:26 vm00 ceph-mon[51670]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:17:26.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:26 vm08 ceph-mon[49535]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:17:27.270 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:17:26 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:17:26.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm00\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:17:27.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:27 vm00 ceph-mon[47364]: pgmap v131: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 73 KiB/s rd, 0 B/s wr, 120 op/s 2026-03-10T13:17:27.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:27 vm00 ceph-mon[51670]: pgmap v131: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 73 KiB/s rd, 0 B/s wr, 120 op/s 2026-03-10T13:17:27.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:27 vm08 ceph-mon[49535]: pgmap v131: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 73 KiB/s rd, 0 B/s wr, 120 op/s 2026-03-10T13:17:29.461 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:17:29 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:17:29] "GET /metrics HTTP/1.1" 200 37536 "" "Prometheus/2.51.0" 2026-03-10T13:17:29.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:29 vm00 ceph-mon[47364]: pgmap v132: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 59 KiB/s rd, 0 B/s wr, 97 op/s 2026-03-10T13:17:29.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:29 vm00 ceph-mon[51670]: pgmap v132: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 59 KiB/s rd, 0 B/s wr, 97 op/s 2026-03-10T13:17:29.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:29 vm08 ceph-mon[49535]: pgmap v132: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 59 KiB/s rd, 0 B/s wr, 97 op/s 2026-03-10T13:17:31.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:31 vm00 ceph-mon[47364]: pgmap v133: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 59 KiB/s rd, 0 B/s wr, 98 op/s 2026-03-10T13:17:31.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:31 vm00 ceph-mon[51670]: pgmap v133: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 59 KiB/s rd, 0 B/s wr, 98 op/s 2026-03-10T13:17:31.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:31 vm08 ceph-mon[49535]: pgmap v133: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 59 KiB/s rd, 0 B/s wr, 98 op/s 2026-03-10T13:17:33.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:33 vm00 ceph-mon[47364]: pgmap v134: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:33.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:33 vm00 ceph-mon[51670]: pgmap v134: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:33.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:33 vm08 ceph-mon[49535]: pgmap v134: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:34.521 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:17:34 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:17:34.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.108:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:17:36.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:35 vm00 ceph-mon[47364]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:36.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:35 vm00 ceph-mon[51670]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:36.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:35 vm08 ceph-mon[49535]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:37.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:36 vm00 ceph-mon[47364]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:17:37.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:36 vm00 ceph-mon[51670]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:17:37.021 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:17:36 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:17:36.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm00\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:17:37.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:36 vm08 ceph-mon[49535]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:17:38.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:37 vm00 ceph-mon[47364]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:17:38.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:37 vm00 ceph-mon[51670]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:17:38.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:37 vm08 ceph-mon[49535]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:17:39.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:17:39 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:17:39] "GET /metrics HTTP/1.1" 200 37536 "" "Prometheus/2.51.0" 2026-03-10T13:17:40.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:39 vm00 ceph-mon[47364]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:40.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:39 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:17:40.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:39 vm00 ceph-mon[51670]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:40.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:39 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:17:40.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:39 vm08 ceph-mon[49535]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:40.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:39 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:17:42.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:41 vm00 ceph-mon[47364]: pgmap v138: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:17:42.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:41 vm00 ceph-mon[51670]: pgmap v138: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:17:42.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:41 vm08 ceph-mon[49535]: pgmap v138: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:17:44.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:43 vm00 ceph-mon[47364]: pgmap v139: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:44.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:43 vm00 ceph-mon[51670]: pgmap v139: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:44.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:43 vm08 ceph-mon[49535]: pgmap v139: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:44.520 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:17:44 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:17:44.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.108:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:17:46.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:45 vm00 ceph-mon[47364]: pgmap v140: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:46.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:45 vm00 ceph-mon[51670]: pgmap v140: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:46.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:45 vm08 ceph-mon[49535]: pgmap v140: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:47.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:46 vm00 ceph-mon[47364]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:17:47.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:46 vm00 ceph-mon[51670]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:17:47.020 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:17:46 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:17:46.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm00\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:17:47.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:46 vm08 ceph-mon[49535]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:17:48.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:47 vm00 ceph-mon[47364]: pgmap v141: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:17:48.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:47 vm00 ceph-mon[51670]: pgmap v141: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:17:48.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:47 vm08 ceph-mon[49535]: pgmap v141: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:17:49.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:17:49 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:17:49] "GET /metrics HTTP/1.1" 200 37536 "" "Prometheus/2.51.0" 2026-03-10T13:17:50.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:49 vm00 ceph-mon[47364]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:50.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:49 vm00 ceph-mon[51670]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:50.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:49 vm08 ceph-mon[49535]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:52.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:51 vm08 ceph-mon[49535]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:17:52.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:51 vm00 ceph-mon[47364]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:17:52.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:51 vm00 ceph-mon[51670]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:17:54.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:53 vm08 ceph-mon[49535]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:54.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:53 vm00 ceph-mon[47364]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:54.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:53 vm00 ceph-mon[51670]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:54.520 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:17:54 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:17:54.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.108:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:17:55.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:54 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:17:55.164 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:54 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:17:55.165 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:54 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:17:56.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:55 vm00 ceph-mon[47364]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:56.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:55 vm00 ceph-mon[51670]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:56.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:55 vm08 ceph-mon[49535]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:57.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:56 vm00 ceph-mon[47364]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:17:57.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:56 vm00 ceph-mon[51670]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:17:57.270 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:17:56 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:17:56.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm00\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:17:57.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:56 vm08 ceph-mon[49535]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:17:58.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:57 vm00 ceph-mon[47364]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:17:58.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:57 vm00 ceph-mon[51670]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:17:58.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:57 vm08 ceph-mon[49535]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:17:59.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:17:59 vm00 ceph-mon[47364]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:59.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:17:59 vm00 ceph-mon[51670]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:17:59.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:17:59 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:17:59] "GET /metrics HTTP/1.1" 200 37534 "" "Prometheus/2.51.0" 2026-03-10T13:17:59.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:17:59 vm08 ceph-mon[49535]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:01.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:01 vm00 ceph-mon[47364]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:18:01.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:01 vm00 ceph-mon[51670]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:18:01.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:01 vm08 ceph-mon[49535]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:18:03.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:03 vm00 ceph-mon[47364]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:03.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:03 vm00 ceph-mon[51670]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:03.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:03 vm08 ceph-mon[49535]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:04.520 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:18:04 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:18:04.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.108:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:18:05.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:05 vm00 ceph-mon[47364]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:05.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:05 vm00 ceph-mon[51670]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:05.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:05 vm08 ceph-mon[49535]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:06.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:06 vm00 ceph-mon[47364]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:18:06.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:06 vm00 ceph-mon[51670]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:18:06.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:06 vm08 ceph-mon[49535]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:18:07.270 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:18:06 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:18:06.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm00\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:18:07.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:07 vm00 ceph-mon[47364]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:18:07.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:07 vm00 ceph-mon[51670]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:18:07.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:07 vm08 ceph-mon[49535]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:18:09.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:18:09 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:18:09] "GET /metrics HTTP/1.1" 200 37534 "" "Prometheus/2.51.0" 2026-03-10T13:18:09.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:09 vm08 ceph-mon[49535]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:09.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:09 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:18:09.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:09 vm00 ceph-mon[47364]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:09.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:09 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:18:09.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:09 vm00 ceph-mon[51670]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:09.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:09 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:18:11.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:11 vm00 ceph-mon[47364]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:18:11.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:11 vm00 ceph-mon[51670]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:18:11.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:11 vm08 ceph-mon[49535]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:18:13.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:13 vm00 ceph-mon[47364]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:13.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:13 vm00 ceph-mon[51670]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:13.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:13 vm08 ceph-mon[49535]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:14.520 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:18:14 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:18:14.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.108:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:18:15.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:15 vm00 ceph-mon[47364]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:15.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:15 vm00 ceph-mon[51670]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:15.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:15 vm08 ceph-mon[49535]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:16.672 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:16 vm08 ceph-mon[49535]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:18:16.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:16 vm00 ceph-mon[47364]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:18:16.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:16 vm00 ceph-mon[51670]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:18:17.270 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:18:16 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:18:16.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm00\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:18:17.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:17 vm00 ceph-mon[47364]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:18:17.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:17 vm00 ceph-mon[51670]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:18:17.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:17 vm08 ceph-mon[49535]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:18:19.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:18:19 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:18:19] "GET /metrics HTTP/1.1" 200 37532 "" "Prometheus/2.51.0" 2026-03-10T13:18:19.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:19 vm08 ceph-mon[49535]: pgmap v157: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:19.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:19 vm00 ceph-mon[51670]: pgmap v157: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:19.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:19 vm00 ceph-mon[47364]: pgmap v157: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:21.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:21 vm00 ceph-mon[51670]: pgmap v158: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:18:21.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:21 vm00 ceph-mon[47364]: pgmap v158: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:18:21.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:21 vm08 ceph-mon[49535]: pgmap v158: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:18:23.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:23 vm00 ceph-mon[47364]: pgmap v159: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:23.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:23 vm00 ceph-mon[51670]: pgmap v159: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:23.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:23 vm08 ceph-mon[49535]: pgmap v159: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:24.521 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:18:24 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:18:24.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.108:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:18:24.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:24 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:18:24.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:24 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:18:24.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:24 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:18:25.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:25 vm00 ceph-mon[47364]: pgmap v160: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:25.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:25 vm00 ceph-mon[51670]: pgmap v160: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:25.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:25 vm08 ceph-mon[49535]: pgmap v160: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:26.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:26 vm00 ceph-mon[47364]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:18:26.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:26 vm00 ceph-mon[51670]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:18:26.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:26 vm08 ceph-mon[49535]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:18:27.270 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:18:26 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:18:26.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm00\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:18:27.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:27 vm00 ceph-mon[47364]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:18:27.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:27 vm00 ceph-mon[51670]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:18:27.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:27 vm08 ceph-mon[49535]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:18:29.401 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:18:29 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:18:29] "GET /metrics HTTP/1.1" 200 37532 "" "Prometheus/2.51.0" 2026-03-10T13:18:29.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:29 vm00 ceph-mon[47364]: pgmap v162: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:29.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:29 vm00 ceph-mon[51670]: pgmap v162: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:29.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:29 vm08 ceph-mon[49535]: pgmap v162: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:31.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:31 vm00 ceph-mon[47364]: pgmap v163: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:18:31.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:31 vm00 ceph-mon[51670]: pgmap v163: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:18:31.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:31 vm08 ceph-mon[49535]: pgmap v163: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:18:33.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:33 vm00 ceph-mon[47364]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:33.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:33 vm00 ceph-mon[51670]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:33.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:33 vm08 ceph-mon[49535]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:34.520 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:18:34 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:18:34.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.100\", device_class=\"hdd\", hostname=\"vm00\", instance=\"192.168.123.108:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.100\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:18:35.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:35 vm08 ceph-mon[49535]: pgmap v165: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:36.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:35 vm00 ceph-mon[47364]: pgmap v165: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:36.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:35 vm00 ceph-mon[51670]: pgmap v165: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:36.948 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:36 vm08 ceph-mon[49535]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:18:37.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:36 vm00 ceph-mon[47364]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:18:37.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:36 vm00 ceph-mon[51670]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:18:37.270 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:18:36 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:18:36.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm00\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"98a3dada-1c81-11f1-89c9-d57c120f78d5\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm00\", job=\"node\", machine=\"x86_64\", nodename=\"vm00\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T13:18:38.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:37 vm00 ceph-mon[47364]: pgmap v166: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:18:38.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:37 vm00 ceph-mon[51670]: pgmap v166: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:18:38.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:37 vm08 ceph-mon[49535]: pgmap v166: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:18:39.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:18:39 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:18:39] "GET /metrics HTTP/1.1" 200 37532 "" "Prometheus/2.51.0" 2026-03-10T13:18:40.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:39 vm00 ceph-mon[47364]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:40.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:39 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:18:40.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:39 vm00 ceph-mon[51670]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:40.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:39 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:18:40.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:39 vm08 ceph-mon[49535]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:40.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:39 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:18:42.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:41 vm00 ceph-mon[51670]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:18:42.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:41 vm00 ceph-mon[47364]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:18:42.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:41 vm08 ceph-mon[49535]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:18:44.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:43 vm00 ceph-mon[47364]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:44.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:43 vm00 ceph-mon[51670]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:44.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:43 vm08 ceph-mon[49535]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:45 vm00 ceph-mon[47364]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:46.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:45 vm00 ceph-mon[51670]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:46.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:45 vm08 ceph-mon[49535]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:46.922 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:46 vm08 ceph-mon[49535]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:18:46.951 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:46 vm00 ceph-mon[51670]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:18:46.952 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:18:46 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:18:46.950Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:18:46.952 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:46 vm00 ceph-mon[47364]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:18:48.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:47 vm00 ceph-mon[47364]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:18:48.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:47 vm00 ceph-mon[51670]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:18:48.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:47 vm08 ceph-mon[49535]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:18:49.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:18:49 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:18:49] "GET /metrics HTTP/1.1" 200 37532 "" "Prometheus/2.51.0" 2026-03-10T13:18:50.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:49 vm00 ceph-mon[47364]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:50.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:49 vm00 ceph-mon[51670]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:50.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:49 vm08 ceph-mon[49535]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:52.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:52 vm00 ceph-mon[51670]: pgmap v173: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:18:52.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:52 vm00 ceph-mon[47364]: pgmap v173: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:18:52.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:52 vm08 ceph-mon[49535]: pgmap v173: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:18:53.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:53 vm00 ceph-mon[51670]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:53.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:53 vm00 ceph-mon[47364]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:53.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:53 vm08 ceph-mon[49535]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:55.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:54 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:18:55.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:54 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:18:55.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:54 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:18:56.490 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:56 vm00 ceph-mon[47364]: pgmap v175: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:56.490 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:56 vm00 ceph-mon[51670]: pgmap v175: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:56.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:56 vm08 ceph-mon[49535]: pgmap v175: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:56.753 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:18:56 vm00 systemd[1]: Stopping Ceph mgr.y for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:18:56.753 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:18:56 vm00 podman[81384]: 2026-03-10 13:18:56.61029329 +0000 UTC m=+0.054566628 container died b259475ee6d8462f0568e99801cc5689c17e3b4f7cee80c6b719f47483548123 (image=quay.io/ceph/ceph:v17.2.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, ceph=True, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.openshift.tags=base centos centos-stream, GIT_BRANCH=HEAD, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, release=754, io.openshift.expose-services=, vcs-type=git, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, com.redhat.component=centos-stream-container, vendor=Red Hat, Inc., CEPH_POINT_RELEASE=-17.2.0, version=8, architecture=x86_64, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, distribution-scope=public, RELEASE=HEAD, build-date=2022-05-03T08:36:31.336870, GIT_REPO=https://github.com/ceph/ceph-container.git, io.k8s.display-name=CentOS Stream 8, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Guillaume Abrioux , name=centos-stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, GIT_CLEAN=True, io.buildah.version=1.19.8) 2026-03-10T13:18:56.753 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:18:56 vm00 podman[81384]: 2026-03-10 13:18:56.640105182 +0000 UTC m=+0.084378520 container remove b259475ee6d8462f0568e99801cc5689c17e3b4f7cee80c6b719f47483548123 (image=quay.io/ceph/ceph:v17.2.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, io.k8s.display-name=CentOS Stream 8, RELEASE=HEAD, vcs-type=git, distribution-scope=public, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.openshift.expose-services=, vendor=Red Hat, Inc., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, release=754, version=8, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, CEPH_POINT_RELEASE=-17.2.0, architecture=x86_64, GIT_BRANCH=HEAD, GIT_CLEAN=True, GIT_REPO=https://github.com/ceph/ceph-container.git, build-date=2022-05-03T08:36:31.336870, ceph=True, com.redhat.component=centos-stream-container, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.license_terms=https://centos.org/legal/licensing-policy/, maintainer=Guillaume Abrioux , name=centos-stream, io.buildah.version=1.19.8, io.openshift.tags=base centos centos-stream) 2026-03-10T13:18:56.753 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:18:56 vm00 bash[81384]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y 2026-03-10T13:18:56.753 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:18:56 vm00 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mgr.y.service: Main process exited, code=exited, status=143/n/a 2026-03-10T13:18:56.753 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:18:56 vm00 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mgr.y.service: Failed with result 'exit-code'. 2026-03-10T13:18:56.753 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:18:56 vm00 systemd[1]: Stopped Ceph mgr.y for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:18:56.753 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:18:56 vm00 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mgr.y.service: Consumed 36.129s CPU time. 2026-03-10T13:18:57.026 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:18:56 vm00 systemd[1]: Starting Ceph mgr.y for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:18:57.026 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:18:56 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:18:56.949Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:18:57.026 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:18:56 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:18:56.950Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:18:57.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:57 vm00 ceph-mon[47364]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:18:57.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:57 vm00 ceph-mon[47364]: pgmap v176: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:18:57.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:57 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:18:57.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:57 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:18:57.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:57 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:18:57.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:57 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:18:57.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:57 vm00 ceph-mon[51670]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:18:57.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:57 vm00 ceph-mon[51670]: pgmap v176: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:18:57.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:57 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:18:57.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:57 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:18:57.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:57 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:18:57.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:57 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:18:57.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:18:57 vm00 podman[81494]: 2026-03-10 13:18:57.025385248 +0000 UTC m=+0.016737275 container create 5bc576d4d32be55f1074bb50ab6e5fff01b91e80bb3eb41cd22ab252461820b8 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T13:18:57.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:18:57 vm00 podman[81494]: 2026-03-10 13:18:57.076443697 +0000 UTC m=+0.067795734 container init 5bc576d4d32be55f1074bb50ab6e5fff01b91e80bb3eb41cd22ab252461820b8 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, ceph=True, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid) 2026-03-10T13:18:57.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:18:57 vm00 podman[81494]: 2026-03-10 13:18:57.08065468 +0000 UTC m=+0.072006707 container start 5bc576d4d32be55f1074bb50ab6e5fff01b91e80bb3eb41cd22ab252461820b8 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_REF=squid, org.label-schema.build-date=20260223, OSD_FLAVOR=default) 2026-03-10T13:18:57.506 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:18:57 vm00 bash[81494]: 5bc576d4d32be55f1074bb50ab6e5fff01b91e80bb3eb41cd22ab252461820b8 2026-03-10T13:18:57.506 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:18:57 vm00 podman[81494]: 2026-03-10 13:18:57.017960408 +0000 UTC m=+0.009312446 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:18:57.506 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:18:57 vm00 systemd[1]: Started Ceph mgr.y for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:18:57.506 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:18:57 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:18:57.211+0000 7fea49343140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-10T13:18:57.506 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:18:57 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:18:57.263+0000 7fea49343140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T13:18:57.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:57 vm08 ceph-mon[49535]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:18:57.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:57 vm08 ceph-mon[49535]: pgmap v176: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:18:57.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:57 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:18:57.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:57 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:18:57.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:57 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:18:57.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:57 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:18:57.870 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:18:57 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:18:57.868+0000 7fea49343140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T13:18:58.592 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:18:58 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:18:58.316+0000 7fea49343140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T13:18:58.592 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:18:58 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-10T13:18:58.592 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:18:58 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-10T13:18:58.592 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:18:58 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: from numpy import show_config as show_numpy_config 2026-03-10T13:18:58.592 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:18:58 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:18:58.439+0000 7fea49343140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T13:18:58.592 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:18:58 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:18:58.485+0000 7fea49343140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T13:18:58.849 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:18:58 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:18:58.632+0000 7fea49343140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T13:18:59.521 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:18:59 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:18:59] "GET /metrics HTTP/1.1" 200 37532 "" "Prometheus/2.51.0" 2026-03-10T13:18:59.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:59 vm00 ceph-mon[47364]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:59.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:59 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:18:59.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:59 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:18:59.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:59 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:18:59.814 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:18:59 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:18:59.814 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:18:59 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:18:59.663+0000 7fea49343140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T13:18:59.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:59 vm00 ceph-mon[51670]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:18:59.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:59 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:18:59.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:59 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:18:59.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:59 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:18:59.815 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:18:59 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:19:00.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:59 vm08 ceph-mon[49535]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:19:00.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:59 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:19:00.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:59 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:19:00.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:59 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:19:00.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:18:59 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:19:00.176 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:18:59 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:18:59.813+0000 7fea49343140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T13:19:00.176 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:18:59 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:18:59.860+0000 7fea49343140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T13:19:00.176 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:18:59 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:18:59.899+0000 7fea49343140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T13:19:00.176 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:18:59 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:18:59.946+0000 7fea49343140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T13:19:00.176 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:18:59 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:18:59.990+0000 7fea49343140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T13:19:00.502 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:00 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:19:00.175+0000 7fea49343140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T13:19:00.502 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:00 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:19:00.241+0000 7fea49343140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T13:19:00.827 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:00 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:19:00.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:00 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:19:00.828 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:00 vm00 ceph-mon[47364]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:19:00.828 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:00 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:19:00.508+0000 7fea49343140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-10T13:19:00.828 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:00 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:19:00.828 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:00 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:19:00.828 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:00 vm00 ceph-mon[51670]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:19:01.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:00 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:19:01.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:00 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:19:01.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:00 vm08 ceph-mon[49535]: from='mgr.24680 ' entity='mgr.x' 2026-03-10T13:19:01.131 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:00 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:19:00.827+0000 7fea49343140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T13:19:01.131 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:00 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:19:00.868+0000 7fea49343140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T13:19:01.131 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:00 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:19:00.912+0000 7fea49343140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T13:19:01.131 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:01 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:19:01.001+0000 7fea49343140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T13:19:01.131 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:01 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:19:01.044+0000 7fea49343140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T13:19:01.424 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:01 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:19:01.130+0000 7fea49343140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T13:19:01.424 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:01 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:19:01.254+0000 7fea49343140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T13:19:01.752 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:01 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:19:01.423+0000 7fea49343140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T13:19:01.753 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:01 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:19:01.466+0000 7fea49343140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T13:19:01.753 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:01 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:19:01] ENGINE Bus STARTING 2026-03-10T13:19:01.753 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:01 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: CherryPy Checker: 2026-03-10T13:19:01.753 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:01 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: The Application mounted at '' has an empty config. 2026-03-10T13:19:01.753 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:01 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:19:01.753 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:01 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:19:01] ENGINE Serving on http://:::9283 2026-03-10T13:19:01.753 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:01 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:19:01] ENGINE Bus STARTED 2026-03-10T13:19:02.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:02 vm00 ceph-mon[47364]: pgmap v178: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:19:02.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:02 vm00 ceph-mon[47364]: Standby manager daemon y restarted 2026-03-10T13:19:02.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:02 vm00 ceph-mon[47364]: Standby manager daemon y started 2026-03-10T13:19:02.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:02 vm00 ceph-mon[47364]: from='mgr.? 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-10T13:19:02.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:02 vm00 ceph-mon[47364]: from='mgr.? 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T13:19:02.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:02 vm00 ceph-mon[47364]: from='mgr.? 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-10T13:19:02.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:02 vm00 ceph-mon[47364]: from='mgr.? 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T13:19:02.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:02 vm00 ceph-mon[51670]: pgmap v178: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:19:02.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:02 vm00 ceph-mon[51670]: Standby manager daemon y restarted 2026-03-10T13:19:02.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:02 vm00 ceph-mon[51670]: Standby manager daemon y started 2026-03-10T13:19:02.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:02 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-10T13:19:02.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:02 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T13:19:02.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:02 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-10T13:19:02.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:02 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T13:19:02.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:02 vm08 ceph-mon[49535]: pgmap v178: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:19:02.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:02 vm08 ceph-mon[49535]: Standby manager daemon y restarted 2026-03-10T13:19:02.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:02 vm08 ceph-mon[49535]: Standby manager daemon y started 2026-03-10T13:19:02.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:02 vm08 ceph-mon[49535]: from='mgr.? 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-10T13:19:02.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:02 vm08 ceph-mon[49535]: from='mgr.? 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T13:19:02.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:02 vm08 ceph-mon[49535]: from='mgr.? 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-10T13:19:02.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:02 vm08 ceph-mon[49535]: from='mgr.? 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T13:19:03.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:03 vm00 ceph-mon[47364]: pgmap v179: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:19:03.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:03 vm00 ceph-mon[47364]: mgrmap e27: x(active, since 5m), standbys: y 2026-03-10T13:19:03.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:03 vm00 ceph-mon[51670]: pgmap v179: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:19:03.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:03 vm00 ceph-mon[51670]: mgrmap e27: x(active, since 5m), standbys: y 2026-03-10T13:19:03.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:03 vm08 ceph-mon[49535]: pgmap v179: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:19:03.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:03 vm08 ceph-mon[49535]: mgrmap e27: x(active, since 5m), standbys: y 2026-03-10T13:19:05.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:05 vm00 ceph-mon[47364]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:19:05.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:05 vm00 ceph-mon[51670]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:19:05.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:05 vm08 ceph-mon[49535]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:19:06.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:06 vm00 ceph-mon[47364]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:19:06.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:06 vm00 ceph-mon[51670]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:19:06.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:06 vm08 ceph-mon[49535]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:19:07.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:19:06 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:19:06.950Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:19:07.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:19:06 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:19:06.950Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:19:07.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:07 vm00 ceph-mon[47364]: pgmap v181: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:19:07.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:07 vm00 ceph-mon[51670]: pgmap v181: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:19:07.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:07 vm08 ceph-mon[49535]: pgmap v181: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:19:09.425 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:09 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:19:09] "GET /metrics HTTP/1.1" 200 37532 "" "Prometheus/2.51.0" 2026-03-10T13:19:09.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:09 vm00 ceph-mon[51670]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:19:09.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:09 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:19:09.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:09 vm00 ceph-mon[47364]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:19:09.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:09 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:19:09.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:09 vm08 ceph-mon[49535]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:19:09.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:09 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:19:11.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:11 vm00 ceph-mon[47364]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:19:11.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:11 vm00 ceph-mon[51670]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:19:11.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:11 vm08 ceph-mon[49535]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:19:13.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:13 vm00 ceph-mon[47364]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:19:13.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:13 vm00 ceph-mon[51670]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:19:13.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:13 vm08 ceph-mon[49535]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:19:15.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:15 vm00 ceph-mon[47364]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:19:15.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:15 vm00 ceph-mon[51670]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:19:15.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:15 vm08 ceph-mon[49535]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:19:16.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:16 vm00 ceph-mon[47364]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:19:16.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:16 vm00 ceph-mon[51670]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:19:16.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:16 vm08 ceph-mon[49535]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:19:17.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:19:16 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:19:16.950Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:19:17.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:19:16 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:19:16.951Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:19:17.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:17 vm00 ceph-mon[47364]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:19:17.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:17 vm00 ceph-mon[51670]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:19:17.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:17 vm08 ceph-mon[49535]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:19:19.452 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:19 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:19:19] "GET /metrics HTTP/1.1" 200 37532 "" "Prometheus/2.51.0" 2026-03-10T13:19:19.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:19 vm00 ceph-mon[47364]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:19:19.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:19 vm00 ceph-mon[51670]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:19:19.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:19 vm08 ceph-mon[49535]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:19:21.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:21 vm00 ceph-mon[47364]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:19:21.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:21 vm00 ceph-mon[51670]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:19:21.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:21 vm08 ceph-mon[49535]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:19:23.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:23 vm00 ceph-mon[47364]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:19:23.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:23 vm00 ceph-mon[51670]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:19:23.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:23 vm08 ceph-mon[49535]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:19:24.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:24 vm00 ceph-mon[47364]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:19:24.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:24 vm00 ceph-mon[51670]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:19:24.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:24 vm08 ceph-mon[49535]: from='mgr.24680 192.168.123.108:0/229555541' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:19:25.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:25 vm00 ceph-mon[47364]: pgmap v190: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:19:25.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:25 vm00 ceph-mon[51670]: pgmap v190: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:19:25.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:25 vm08 ceph-mon[49535]: pgmap v190: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:19:26.079 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-10T13:19:26.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:26 vm00 ceph-mon[47364]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:19:26.510 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:26 vm00 ceph-mon[51670]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:19:26.721 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T13:19:26.721 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (5m) 27s ago 12m 22.7M - 0.25.0 c8568f914cd2 12fde3cf83cb 2026-03-10T13:19:26.721 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm08 *:3000 running (5m) 28s ago 12m 46.8M - dad864ee21e9 263cac442a99 2026-03-10T13:19:26.721 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dezodo vm00 running (5m) 27s ago 12m 49.3M - 3.5 e1d6a67b021e 820a5402f9e5 2026-03-10T13:19:26.721 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm08 *:8443,9283 running (8m) 28s ago 14m 561M - 19.2.3-678-ge911bdeb 654f31e6858e 62b908c184a8 2026-03-10T13:19:26.721 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (29s) 27s ago 14m 58.5M - 19.2.3-678-ge911bdeb 654f31e6858e 5bc576d4d32b 2026-03-10T13:19:26.721 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (14m) 27s ago 15m 63.6M 2048M 17.2.0 e1d6a67b021e f0e3f322471c 2026-03-10T13:19:26.721 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm08 running (14m) 28s ago 14m 45.0M 2048M 17.2.0 e1d6a67b021e d3c1458bc898 2026-03-10T13:19:26.721 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (14m) 27s ago 14m 45.6M 2048M 17.2.0 e1d6a67b021e d00b7fd44c23 2026-03-10T13:19:26.721 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (5m) 27s ago 12m 10.4M - 1.7.0 72c9c2088986 bcf883401619 2026-03-10T13:19:26.721 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm08 *:9100 running (5m) 28s ago 12m 9.99M - 1.7.0 72c9c2088986 4ac83f03f818 2026-03-10T13:19:26.721 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (13m) 27s ago 13m 51.5M 4096M 17.2.0 e1d6a67b021e 2919c7073fa7 2026-03-10T13:19:26.721 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (13m) 27s ago 13m 57.0M 4096M 17.2.0 e1d6a67b021e 647927dc41ea 2026-03-10T13:19:26.721 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (13m) 27s ago 13m 53.9M 4096M 17.2.0 e1d6a67b021e 1e417e82c2b9 2026-03-10T13:19:26.721 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (13m) 27s ago 13m 52.0M 4096M 17.2.0 e1d6a67b021e 98bb6d7ea69f 2026-03-10T13:19:26.721 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm08 running (13m) 28s ago 13m 53.8M 4096M 17.2.0 e1d6a67b021e e349440ca776 2026-03-10T13:19:26.721 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm08 running (13m) 28s ago 13m 54.9M 4096M 17.2.0 e1d6a67b021e 9a6a2a67cc99 2026-03-10T13:19:26.721 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm08 running (13m) 28s ago 13m 50.5M 4096M 17.2.0 e1d6a67b021e c27676916d52 2026-03-10T13:19:26.721 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm08 running (12m) 28s ago 12m 52.2M 4096M 17.2.0 e1d6a67b021e bf67951990a5 2026-03-10T13:19:26.721 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm08 *:9095 running (5m) 28s ago 12m 52.6M - 2.51.0 1d3b7f56885b 5ef54cde2aad 2026-03-10T13:19:26.722 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.tvlvzo vm00 *:8000 running (12m) 27s ago 12m 93.9M - 17.2.0 e1d6a67b021e be24eac16807 2026-03-10T13:19:26.722 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm08.ljayps vm08 *:8000 running (12m) 28s ago 12m 91.5M - 17.2.0 e1d6a67b021e 67b525427823 2026-03-10T13:19:26.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:26 vm08 ceph-mon[49535]: from='client.24790 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:19:26.803 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions' 2026-03-10T13:19:27.031 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:19:26 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:19:26.951Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:19:27.031 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:19:26 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:19:26.953Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:19:27.388 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:19:27.388 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T13:19:27.388 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-10T13:19:27.388 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:19:27.388 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T13:19:27.388 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T13:19:27.388 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:19:27.388 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T13:19:27.388 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-10T13:19:27.388 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:19:27.388 INFO:teuthology.orchestra.run.vm00.stdout: "mds": {}, 2026-03-10T13:19:27.388 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T13:19:27.388 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-10T13:19:27.388 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:19:27.388 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T13:19:27.388 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 13, 2026-03-10T13:19:27.388 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T13:19:27.388 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T13:19:27.388 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:19:28.336 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph health detail' 2026-03-10T13:19:28.582 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:28 vm00 ceph-mon[47364]: pgmap v191: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:19:28.582 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:28 vm00 ceph-mon[47364]: from='client.15033 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:19:28.582 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:28 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/1138404755' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:19:28.582 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:28 vm00 ceph-mon[51670]: pgmap v191: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:19:28.582 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:28 vm00 ceph-mon[51670]: from='client.15033 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:19:28.582 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:28 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/1138404755' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:19:28.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:28 vm08 ceph-mon[49535]: pgmap v191: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:19:28.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:28 vm08 ceph-mon[49535]: from='client.15033 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:19:28.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:28 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/1138404755' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:19:29.100 INFO:teuthology.orchestra.run.vm00.stdout:HEALTH_OK 2026-03-10T13:19:29.181 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph -s' 2026-03-10T13:19:29.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:29 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:19:29] "GET /metrics HTTP/1.1" 200 37530 "" "Prometheus/2.51.0" 2026-03-10T13:19:29.821 INFO:teuthology.orchestra.run.vm00.stdout: cluster: 2026-03-10T13:19:29.821 INFO:teuthology.orchestra.run.vm00.stdout: id: 98a3dada-1c81-11f1-89c9-d57c120f78d5 2026-03-10T13:19:29.821 INFO:teuthology.orchestra.run.vm00.stdout: health: HEALTH_OK 2026-03-10T13:19:29.821 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T13:19:29.821 INFO:teuthology.orchestra.run.vm00.stdout: services: 2026-03-10T13:19:29.821 INFO:teuthology.orchestra.run.vm00.stdout: mon: 3 daemons, quorum a,c,b (age 14m) 2026-03-10T13:19:29.821 INFO:teuthology.orchestra.run.vm00.stdout: mgr: x(active, since 6m), standbys: y 2026-03-10T13:19:29.821 INFO:teuthology.orchestra.run.vm00.stdout: osd: 8 osds: 8 up (since 12m), 8 in (since 13m) 2026-03-10T13:19:29.821 INFO:teuthology.orchestra.run.vm00.stdout: rgw: 2 daemons active (2 hosts, 1 zones) 2026-03-10T13:19:29.821 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T13:19:29.821 INFO:teuthology.orchestra.run.vm00.stdout: data: 2026-03-10T13:19:29.821 INFO:teuthology.orchestra.run.vm00.stdout: pools: 6 pools, 161 pgs 2026-03-10T13:19:29.821 INFO:teuthology.orchestra.run.vm00.stdout: objects: 209 objects, 457 KiB 2026-03-10T13:19:29.821 INFO:teuthology.orchestra.run.vm00.stdout: usage: 95 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:19:29.821 INFO:teuthology.orchestra.run.vm00.stdout: pgs: 161 active+clean 2026-03-10T13:19:29.821 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T13:19:29.821 INFO:teuthology.orchestra.run.vm00.stdout: io: 2026-03-10T13:19:29.821 INFO:teuthology.orchestra.run.vm00.stdout: client: 853 B/s rd, 0 op/s rd, 0 op/s wr 2026-03-10T13:19:29.821 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T13:19:29.903 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:29 vm00 ceph-mon[47364]: pgmap v192: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:19:29.903 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:29 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2905262425' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T13:19:29.904 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:29 vm00 ceph-mon[51670]: pgmap v192: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:19:29.904 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:29 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2905262425' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T13:19:29.906 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph mgr fail' 2026-03-10T13:19:30.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:29 vm08 ceph-mon[49535]: pgmap v192: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:19:30.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:29 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/2905262425' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T13:19:30.721 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'sleep 180' 2026-03-10T13:19:30.911 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:30 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/3753077617' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-10T13:19:30.911 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:30 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/72367421' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-10T13:19:30.911 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:30 vm00 ceph-mon[47364]: osdmap e86: 8 total, 8 up, 8 in 2026-03-10T13:19:30.911 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:19:30] ENGINE Bus STOPPING 2026-03-10T13:19:30.911 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:30 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/3753077617' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-10T13:19:30.911 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:30 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/72367421' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-10T13:19:30.911 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:30 vm00 ceph-mon[51670]: osdmap e86: 8 total, 8 up, 8 in 2026-03-10T13:19:31.020 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:30 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ignoring --setuser ceph since I am not root 2026-03-10T13:19:31.020 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:30 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ignoring --setgroup ceph since I am not root 2026-03-10T13:19:31.020 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:30 vm08 ceph-mgr[68040]: -- 192.168.123.108:0/4181030213 <== mon.2 v2:192.168.123.108:3300/0 4 ==== auth_reply(proto 2 0 (0) Success) ==== 194+0+0 (secure 0 0 0) 0x5596cf3af4a0 con 0x5596cf38d400 2026-03-10T13:19:31.020 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:30 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:19:30.787+0000 7f2321dcc140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-10T13:19:31.020 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:30 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:19:30.829+0000 7f2321dcc140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T13:19:31.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:30 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/3753077617' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-10T13:19:31.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:30 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/72367421' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-10T13:19:31.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:30 vm08 ceph-mon[49535]: osdmap e86: 8 total, 8 up, 8 in 2026-03-10T13:19:31.211 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:31 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:19:31] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-10T13:19:31.211 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:31 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:19:31] ENGINE Bus STOPPED 2026-03-10T13:19:31.211 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:31 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:19:31] ENGINE Bus STARTING 2026-03-10T13:19:31.461 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:31 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:19:31] ENGINE Serving on http://:::9283 2026-03-10T13:19:31.462 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:31 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:19:31] ENGINE Bus STARTED 2026-03-10T13:19:31.496 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:31 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:19:31.229+0000 7f2321dcc140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T13:19:31.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/72367421' entity='client.admin' cmd='[{"prefix": "mgr fail"}]': finished 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[47364]: mgrmap e28: y(active, starting, since 0.218353s) 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[47364]: Manager daemon y is now available 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[47364]: from='mgr.15027 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[47364]: from='mgr.15027 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/72367421' entity='client.admin' cmd='[{"prefix": "mgr fail"}]': finished 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[51670]: mgrmap e28: y(active, starting, since 0.218353s) 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[51670]: Manager daemon y is now available 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[51670]: from='mgr.15027 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:19:31.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:31 vm00 ceph-mon[51670]: from='mgr.15027 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:19:31.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:31 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:19:31.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:31 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:19:31.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:31 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:19:31.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:31 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T13:19:31.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:31 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T13:19:31.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:31 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T13:19:31.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:31 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T13:19:31.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:31 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:19:31.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:31 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:19:31.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:31 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:19:31.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:31 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:19:31.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:31 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:19:31.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:31 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/72367421' entity='client.admin' cmd='[{"prefix": "mgr fail"}]': finished 2026-03-10T13:19:31.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:31 vm08 ceph-mon[49535]: mgrmap e28: y(active, starting, since 0.218353s) 2026-03-10T13:19:31.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:31 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T13:19:31.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:31 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T13:19:31.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:31 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T13:19:31.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:31 vm08 ceph-mon[49535]: Manager daemon y is now available 2026-03-10T13:19:31.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:31 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:19:31.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:31 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:19:31.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:31 vm08 ceph-mon[49535]: from='mgr.15027 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:19:31.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:31 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:19:31.772 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:31 vm08 ceph-mon[49535]: from='mgr.15027 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:19:31.772 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:31 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:19:31.546+0000 7f2321dcc140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T13:19:31.772 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:31 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-10T13:19:31.772 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:31 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-10T13:19:31.772 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:31 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: from numpy import show_config as show_numpy_config 2026-03-10T13:19:31.772 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:31 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:19:31.643+0000 7f2321dcc140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T13:19:31.772 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:31 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:19:31.696+0000 7f2321dcc140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T13:19:32.107 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:31 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:19:31.786+0000 7f2321dcc140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T13:19:32.636 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:32 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:19:32.377+0000 7f2321dcc140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T13:19:32.637 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:32 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:19:32.499+0000 7f2321dcc140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T13:19:32.637 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:32 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:19:32.556+0000 7f2321dcc140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T13:19:32.637 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:32 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:19:32.603+0000 7f2321dcc140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T13:19:32.877 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:32 vm00 ceph-mon[47364]: mgrmap e29: y(active, since 1.24053s) 2026-03-10T13:19:32.877 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:32 vm00 ceph-mon[47364]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:19:32.878 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:32 vm00 ceph-mon[51670]: mgrmap e29: y(active, since 1.24053s) 2026-03-10T13:19:32.878 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:32 vm00 ceph-mon[51670]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:19:32.907 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:32 vm08 ceph-mon[49535]: mgrmap e29: y(active, since 1.24053s) 2026-03-10T13:19:32.907 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:32 vm08 ceph-mon[49535]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:19:32.907 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:32 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:19:32.659+0000 7f2321dcc140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T13:19:32.907 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:32 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:19:32.699+0000 7f2321dcc140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T13:19:32.907 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:32 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:19:32.902+0000 7f2321dcc140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T13:19:33.249 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:32 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:19:32.970+0000 7f2321dcc140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T13:19:33.249 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:33 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:19:33.246+0000 7f2321dcc140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-10T13:19:33.891 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:33 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:19:33.579+0000 7f2321dcc140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T13:19:33.891 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:33 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:19:33.625+0000 7f2321dcc140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T13:19:33.891 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:33 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:19:33.676+0000 7f2321dcc140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T13:19:33.891 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:33 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:19:33.764+0000 7f2321dcc140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T13:19:33.891 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:33 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:19:33.804+0000 7f2321dcc140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T13:19:34.183 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:33 vm08 ceph-mon[49535]: [10/Mar/2026:13:19:32] ENGINE Bus STARTING 2026-03-10T13:19:34.183 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:33 vm08 ceph-mon[49535]: [10/Mar/2026:13:19:32] ENGINE Client ('192.168.123.100', 33504) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T13:19:34.183 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:33 vm08 ceph-mon[49535]: [10/Mar/2026:13:19:32] ENGINE Serving on https://192.168.123.100:7150 2026-03-10T13:19:34.183 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:33 vm08 ceph-mon[49535]: [10/Mar/2026:13:19:32] ENGINE Serving on http://192.168.123.100:8765 2026-03-10T13:19:34.183 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:33 vm08 ceph-mon[49535]: [10/Mar/2026:13:19:32] ENGINE Bus STARTED 2026-03-10T13:19:34.183 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:33 vm08 ceph-mon[49535]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:19:34.183 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:33 vm08 ceph-mon[49535]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:34.183 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:33 vm08 ceph-mon[49535]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:34.183 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:33 vm08 ceph-mon[49535]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:34.183 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:33 vm08 ceph-mon[49535]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:34.183 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:33 vm08 ceph-mon[49535]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:34.183 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:33 vm08 ceph-mon[49535]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:34.183 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:33 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:19:34.183 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:33 vm08 ceph-mon[49535]: from='mgr.15027 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:19:34.183 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:33 vm08 ceph-mon[49535]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:34.183 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:33 vm08 ceph-mon[49535]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:34.183 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:33 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:19:34.183 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:33 vm08 ceph-mon[49535]: from='mgr.15027 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:19:34.183 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:33 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:19:34.183 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:33 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:19:34.183 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:33 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:19:33.888+0000 7f2321dcc140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T13:19:34.183 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:34 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:19:34.017+0000 7f2321dcc140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T13:19:34.183 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:34 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:19:34.179+0000 7f2321dcc140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[51670]: [10/Mar/2026:13:19:32] ENGINE Bus STARTING 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[51670]: [10/Mar/2026:13:19:32] ENGINE Client ('192.168.123.100', 33504) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[51670]: [10/Mar/2026:13:19:32] ENGINE Serving on https://192.168.123.100:7150 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[51670]: [10/Mar/2026:13:19:32] ENGINE Serving on http://192.168.123.100:8765 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[51670]: [10/Mar/2026:13:19:32] ENGINE Bus STARTED 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[51670]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[51670]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[51670]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[51670]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[51670]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[51670]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[51670]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[51670]: from='mgr.15027 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[51670]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[51670]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[51670]: from='mgr.15027 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[47364]: [10/Mar/2026:13:19:32] ENGINE Bus STARTING 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[47364]: [10/Mar/2026:13:19:32] ENGINE Client ('192.168.123.100', 33504) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[47364]: [10/Mar/2026:13:19:32] ENGINE Serving on https://192.168.123.100:7150 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[47364]: [10/Mar/2026:13:19:32] ENGINE Serving on http://192.168.123.100:8765 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[47364]: [10/Mar/2026:13:19:32] ENGINE Bus STARTED 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[47364]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[47364]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[47364]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[47364]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[47364]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[47364]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[47364]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[47364]: from='mgr.15027 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[47364]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[47364]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[47364]: from='mgr.15027 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:19:34.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:33 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:19:34.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:34 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:19:34.228+0000 7f2321dcc140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T13:19:34.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:34 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:19:34] ENGINE Bus STARTING 2026-03-10T13:19:34.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:34 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: CherryPy Checker: 2026-03-10T13:19:34.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:34 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: The Application mounted at '' has an empty config. 2026-03-10T13:19:34.521 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:34 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:19:34.521 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:34 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:19:34] ENGINE Serving on http://:::9283 2026-03-10T13:19:34.521 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:19:34 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:19:34] ENGINE Bus STARTED 2026-03-10T13:19:35.112 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:19:34.844+0000 7fea06c77640 -1 log_channel(cephadm) log [ERR] : cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm00.dezodo ... 2026-03-10T13:19:35.112 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: Creating ceph-iscsi config... 2026-03-10T13:19:35.112 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: Write file: /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/iscsi.foo.vm00.dezodo/iscsi-gateway.cfg 2026-03-10T13:19:35.112 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: Write file: /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/iscsi.foo.vm00.dezodo/tcmu-runner-entrypoint.sh 2026-03-10T13:19:35.112 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: Non-zero exit code 1 from systemctl restart ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo 2026-03-10T13:19:35.112 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: systemctl: stderr Job for ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service failed because the control process exited with error code. 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: systemctl: stderr See "systemctl status ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" and "journalctl -xeu ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" for details. 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: Traceback (most recent call last): 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: return _run_code(code, main_globals, None, 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: exec(code, run_globals) 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: RuntimeError: Failed command: systemctl restart ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo: Job for ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service failed because the control process exited with error code. 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: See "systemctl status ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" and "journalctl -xeu ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" for details. 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: Traceback (most recent call last): 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1145, in _check_daemons 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: self.mgr._daemon_action(daemon_spec, action=action) 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/usr/share/ceph/mgr/cephadm/module.py", line 2545, in _daemon_action 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: return self.wait_async( 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: return self.event_loop.get_result(coro, timeout) 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: return future.result(timeout) 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: return self.__get_result() 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: raise self._exception 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1381, in _create_daemon 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: out, err, code = await self._run_cephadm( 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1724, in _run_cephadm 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: raise OrchestratorError( 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: orchestrator._interface.OrchestratorError: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm00.dezodo ... 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: Creating ceph-iscsi config... 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: Write file: /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/iscsi.foo.vm00.dezodo/iscsi-gateway.cfg 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: Write file: /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/iscsi.foo.vm00.dezodo/tcmu-runner-entrypoint.sh 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: Non-zero exit code 1 from systemctl restart ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: systemctl: stderr Job for ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service failed because the control process exited with error code. 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: systemctl: stderr See "systemctl status ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" and "journalctl -xeu ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" for details. 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: Traceback (most recent call last): 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: return _run_code(code, main_globals, None, 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: exec(code, run_globals) 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: RuntimeError: Failed command: systemctl restart ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo: Job for ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service failed because the control process exited with error code. 2026-03-10T13:19:35.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: See "systemctl status ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" and "journalctl -xeu ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" for details. 2026-03-10T13:19:35.376 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:35 vm08 ceph-mon[49535]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T13:19:35.376 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:35 vm08 ceph-mon[49535]: Updating vm08:/etc/ceph/ceph.conf 2026-03-10T13:19:35.376 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:35 vm08 ceph-mon[49535]: Updating vm00:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.conf 2026-03-10T13:19:35.376 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:35 vm08 ceph-mon[49535]: Updating vm08:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.conf 2026-03-10T13:19:35.376 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:35 vm08 ceph-mon[49535]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:19:35.377 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:35 vm08 ceph-mon[49535]: Updating vm08:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:19:35.377 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:35 vm08 ceph-mon[49535]: Updating vm00:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.client.admin.keyring 2026-03-10T13:19:35.377 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:35 vm08 ceph-mon[49535]: Updating vm08:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.client.admin.keyring 2026-03-10T13:19:35.377 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:35 vm08 ceph-mon[49535]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:35.377 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:35 vm08 ceph-mon[49535]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:35.377 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:35 vm08 ceph-mon[49535]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:35.377 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:35 vm08 ceph-mon[49535]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:35.377 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:35 vm08 ceph-mon[49535]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:35.377 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:35 vm08 ceph-mon[49535]: Standby manager daemon x started 2026-03-10T13:19:35.377 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:35 vm08 ceph-mon[49535]: from='mgr.? 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T13:19:35.377 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:35 vm08 ceph-mon[49535]: from='mgr.? 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T13:19:35.377 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:35 vm08 ceph-mon[49535]: from='mgr.? 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T13:19:35.377 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:35 vm08 ceph-mon[49535]: from='mgr.? 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T13:19:35.377 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:35 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:19:35.377 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:35 vm08 ceph-mon[49535]: from='mgr.15027 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:19:35.377 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:35 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:19:35.377 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:35 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T13:19:35.377 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:35 vm08 ceph-mon[49535]: mgrmap e30: y(active, since 3s), standbys: x 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[47364]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[47364]: Updating vm08:/etc/ceph/ceph.conf 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[47364]: Updating vm00:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.conf 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[47364]: Updating vm08:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.conf 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[47364]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[47364]: Updating vm08:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[47364]: Updating vm00:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.client.admin.keyring 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[47364]: Updating vm08:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.client.admin.keyring 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[47364]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[47364]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[47364]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[47364]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[47364]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[47364]: Standby manager daemon x started 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[47364]: from='mgr.? 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[47364]: from='mgr.? 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[47364]: from='mgr.? 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[47364]: from='mgr.? 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[47364]: from='mgr.15027 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[47364]: mgrmap e30: y(active, since 3s), standbys: x 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[51670]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[51670]: Updating vm08:/etc/ceph/ceph.conf 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[51670]: Updating vm00:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.conf 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[51670]: Updating vm08:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.conf 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[51670]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[51670]: Updating vm08:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[51670]: Updating vm00:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.client.admin.keyring 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[51670]: Updating vm08:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.client.admin.keyring 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[51670]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[51670]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[51670]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[51670]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[51670]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[51670]: Standby manager daemon x started 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[51670]: from='mgr.15027 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:19:35.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:19:35.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T13:19:35.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:35 vm00 ceph-mon[51670]: mgrmap e30: y(active, since 3s), standbys: x 2026-03-10T13:19:35.624 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 systemd[1]: Stopping Ceph prometheus.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:19:35.625 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:19:35.542Z caller=main.go:964 level=warn msg="Received SIGTERM, exiting gracefully..." 2026-03-10T13:19:35.625 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:19:35.542Z caller=main.go:988 level=info msg="Stopping scrape discovery manager..." 2026-03-10T13:19:35.625 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:19:35.542Z caller=main.go:1002 level=info msg="Stopping notify discovery manager..." 2026-03-10T13:19:35.625 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:19:35.542Z caller=manager.go:177 level=info component="rule manager" msg="Stopping rule manager..." 2026-03-10T13:19:35.625 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:19:35.542Z caller=main.go:984 level=info msg="Scrape discovery manager stopped" 2026-03-10T13:19:35.625 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:19:35.542Z caller=main.go:998 level=info msg="Notify discovery manager stopped" 2026-03-10T13:19:35.625 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:19:35.542Z caller=manager.go:187 level=info component="rule manager" msg="Rule manager stopped" 2026-03-10T13:19:35.625 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:19:35.542Z caller=main.go:1039 level=info msg="Stopping scrape manager..." 2026-03-10T13:19:35.625 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:19:35.542Z caller=main.go:1031 level=info msg="Scrape manager stopped" 2026-03-10T13:19:35.625 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:19:35.544Z caller=notifier.go:618 level=info component=notifier msg="Stopping notification manager..." 2026-03-10T13:19:35.625 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:19:35.544Z caller=main.go:1261 level=info msg="Notifier manager stopped" 2026-03-10T13:19:35.625 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[71388]: ts=2026-03-10T13:19:35.544Z caller=main.go:1273 level=info msg="See you next time!" 2026-03-10T13:19:35.625 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 podman[74933]: 2026-03-10 13:19:35.554415696 +0000 UTC m=+0.025992352 container died 5ef54cde2aad32806eb7fe252926d9ce30a197177bd920c75e48f208ffb042f2 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T13:19:35.625 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 podman[74933]: 2026-03-10 13:19:35.572596913 +0000 UTC m=+0.044173569 container remove 5ef54cde2aad32806eb7fe252926d9ce30a197177bd920c75e48f208ffb042f2 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T13:19:35.625 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 bash[74933]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a 2026-03-10T13:19:36.021 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@prometheus.a.service: Deactivated successfully. 2026-03-10T13:19:36.021 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 systemd[1]: Stopped Ceph prometheus.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:19:36.021 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 systemd[1]: Starting Ceph prometheus.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:19:36.021 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 podman[75002]: 2026-03-10 13:19:35.716707702 +0000 UTC m=+0.017997696 container create 3b07f384b4b6a851f9f8fc9ff353eacda99d87677443222714b01813a683dcb2 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T13:19:36.021 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 podman[75002]: 2026-03-10 13:19:35.74007031 +0000 UTC m=+0.041360304 container init 3b07f384b4b6a851f9f8fc9ff353eacda99d87677443222714b01813a683dcb2 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T13:19:36.021 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 podman[75002]: 2026-03-10 13:19:35.742214364 +0000 UTC m=+0.043504358 container start 3b07f384b4b6a851f9f8fc9ff353eacda99d87677443222714b01813a683dcb2 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T13:19:36.021 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 bash[75002]: 3b07f384b4b6a851f9f8fc9ff353eacda99d87677443222714b01813a683dcb2 2026-03-10T13:19:36.021 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 podman[75002]: 2026-03-10 13:19:35.708830074 +0000 UTC m=+0.010120068 image pull 1d3b7f56885b6dd623f1785be963aa9c195f86bc256ea454e8d02a7980b79c53 quay.io/prometheus/prometheus:v2.51.0 2026-03-10T13:19:36.021 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 systemd[1]: Started Ceph prometheus.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:19:36.021 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[75012]: ts=2026-03-10T13:19:35.763Z caller=main.go:617 level=info msg="Starting Prometheus Server" mode=server version="(version=2.51.0, branch=HEAD, revision=c05c15512acb675e3f6cd662a6727854e93fc024)" 2026-03-10T13:19:36.021 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[75012]: ts=2026-03-10T13:19:35.763Z caller=main.go:622 level=info build_context="(go=go1.22.1, platform=linux/amd64, user=root@b5723e458358, date=20240319-10:54:45, tags=netgo,builtinassets,stringlabels)" 2026-03-10T13:19:36.021 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[75012]: ts=2026-03-10T13:19:35.763Z caller=main.go:623 level=info host_details="(Linux 5.14.0-686.el9.x86_64 #1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026 x86_64 vm08 (none))" 2026-03-10T13:19:36.021 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[75012]: ts=2026-03-10T13:19:35.763Z caller=main.go:624 level=info fd_limits="(soft=1048576, hard=1048576)" 2026-03-10T13:19:36.021 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[75012]: ts=2026-03-10T13:19:35.763Z caller=main.go:625 level=info vm_limits="(soft=unlimited, hard=unlimited)" 2026-03-10T13:19:36.021 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[75012]: ts=2026-03-10T13:19:35.764Z caller=web.go:568 level=info component=web msg="Start listening for connections" address=:9095 2026-03-10T13:19:36.022 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[75012]: ts=2026-03-10T13:19:35.764Z caller=main.go:1129 level=info msg="Starting TSDB ..." 2026-03-10T13:19:36.022 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[75012]: ts=2026-03-10T13:19:35.768Z caller=tls_config.go:313 level=info component=web msg="Listening on" address=[::]:9095 2026-03-10T13:19:36.022 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[75012]: ts=2026-03-10T13:19:35.768Z caller=tls_config.go:316 level=info component=web msg="TLS is disabled." http2=false address=[::]:9095 2026-03-10T13:19:36.022 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[75012]: ts=2026-03-10T13:19:35.769Z caller=head.go:616 level=info component=tsdb msg="Replaying on-disk memory mappable chunks if any" 2026-03-10T13:19:36.022 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[75012]: ts=2026-03-10T13:19:35.769Z caller=head.go:698 level=info component=tsdb msg="On-disk memory mappable chunks replay completed" duration=1.242µs 2026-03-10T13:19:36.022 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[75012]: ts=2026-03-10T13:19:35.769Z caller=head.go:706 level=info component=tsdb msg="Replaying WAL, this may take a while" 2026-03-10T13:19:36.022 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[75012]: ts=2026-03-10T13:19:35.774Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=0 maxSegment=3 2026-03-10T13:19:36.022 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[75012]: ts=2026-03-10T13:19:35.784Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=1 maxSegment=3 2026-03-10T13:19:36.022 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[75012]: ts=2026-03-10T13:19:35.790Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=2 maxSegment=3 2026-03-10T13:19:36.022 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[75012]: ts=2026-03-10T13:19:35.790Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=3 maxSegment=3 2026-03-10T13:19:36.022 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[75012]: ts=2026-03-10T13:19:35.790Z caller=head.go:815 level=info component=tsdb msg="WAL replay completed" checkpoint_replay_duration=101.109µs wal_replay_duration=21.349089ms wbl_replay_duration=120ns total_replay_duration=21.46167ms 2026-03-10T13:19:36.022 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[75012]: ts=2026-03-10T13:19:35.793Z caller=main.go:1150 level=info fs_type=XFS_SUPER_MAGIC 2026-03-10T13:19:36.022 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[75012]: ts=2026-03-10T13:19:35.793Z caller=main.go:1153 level=info msg="TSDB started" 2026-03-10T13:19:36.022 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[75012]: ts=2026-03-10T13:19:35.793Z caller=main.go:1335 level=info msg="Loading configuration file" filename=/etc/prometheus/prometheus.yml 2026-03-10T13:19:36.022 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[75012]: ts=2026-03-10T13:19:35.802Z caller=main.go:1372 level=info msg="Completed loading of configuration file" filename=/etc/prometheus/prometheus.yml totalDuration=9.515695ms db_storage=1.072µs remote_storage=1.022µs web_handler=200ns query_engine=630ns scrape=1.124936ms scrape_sd=83.255µs notify=7.444µs notify_sd=6.482µs rules=7.984256ms tracing=3.005µs 2026-03-10T13:19:36.022 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[75012]: ts=2026-03-10T13:19:35.802Z caller=main.go:1114 level=info msg="Server is ready to receive web requests." 2026-03-10T13:19:36.022 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:19:35 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[75012]: ts=2026-03-10T13:19:35.802Z caller=manager.go:163 level=info component="rule manager" msg="Starting rule manager..." 2026-03-10T13:19:36.114 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:35 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:19:35] ENGINE Bus STOPPING 2026-03-10T13:19:36.114 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:35 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:19:35] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-10T13:19:36.114 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:35 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:19:35] ENGINE Bus STOPPED 2026-03-10T13:19:36.114 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:35 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:19:35] ENGINE Bus STARTING 2026-03-10T13:19:36.114 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:35 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:19:35] ENGINE Serving on http://:::9283 2026-03-10T13:19:36.114 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:35 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:19:35] ENGINE Bus STARTED 2026-03-10T13:19:36.400 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:36 vm08 ceph-mon[49535]: Reconfiguring iscsi.foo.vm00.dezodo (dependencies changed)... 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:36 vm08 ceph-mon[49535]: Reconfiguring daemon iscsi.foo.vm00.dezodo on vm00 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:36 vm08 ceph-mon[49535]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:36 vm08 ceph-mon[49535]: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm00.dezodo ... 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: Creating ceph-iscsi config... 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: Write file: /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/iscsi.foo.vm00.dezodo/iscsi-gateway.cfg 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: Write file: /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/iscsi.foo.vm00.dezodo/tcmu-runner-entrypoint.sh 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: Non-zero exit code 1 from systemctl restart ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: systemctl: stderr Job for ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service failed because the control process exited with error code. 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: systemctl: stderr See "systemctl status ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" and "journalctl -xeu ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" for details. 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: Traceback (most recent call last): 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: return _run_code(code, main_globals, None, 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: exec(code, run_globals) 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: RuntimeError: Failed command: systemctl restart ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo: Job for ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service failed because the control process exited with error code. 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: See "systemctl status ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" and "journalctl -xeu ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" for details. 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: Traceback (most recent call last): 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1145, in _check_daemons 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: self.mgr._daemon_action(daemon_spec, action=action) 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 2545, in _daemon_action 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: return self.wait_async( 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: return self.event_loop.get_result(coro, timeout) 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: return future.result(timeout) 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: return self.__get_result() 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: raise self._exception 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1381, in _create_daemon 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: out, err, code = await self._run_cephadm( 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1724, in _run_cephadm 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: raise OrchestratorError( 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: orchestrator._interface.OrchestratorError: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm00.dezodo ... 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: Creating ceph-iscsi config... 2026-03-10T13:19:36.401 INFO:journalctl@ceph.mon.b.vm08.stdout: Write file: /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/iscsi.foo.vm00.dezodo/iscsi-gateway.cfg 2026-03-10T13:19:36.402 INFO:journalctl@ceph.mon.b.vm08.stdout: Write file: /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/iscsi.foo.vm00.dezodo/tcmu-runner-entrypoint.sh 2026-03-10T13:19:36.402 INFO:journalctl@ceph.mon.b.vm08.stdout: Non-zero exit code 1 from systemctl restart ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo 2026-03-10T13:19:36.402 INFO:journalctl@ceph.mon.b.vm08.stdout: systemctl: stderr Job for ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service failed because the control process exited with error code. 2026-03-10T13:19:36.402 INFO:journalctl@ceph.mon.b.vm08.stdout: systemctl: stderr See "systemctl status ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" and "journalctl -xeu ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" for details. 2026-03-10T13:19:36.402 INFO:journalctl@ceph.mon.b.vm08.stdout: Traceback (most recent call last): 2026-03-10T13:19:36.402 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-10T13:19:36.402 INFO:journalctl@ceph.mon.b.vm08.stdout: return _run_code(code, main_globals, None, 2026-03-10T13:19:36.402 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-10T13:19:36.402 INFO:journalctl@ceph.mon.b.vm08.stdout: exec(code, run_globals) 2026-03-10T13:19:36.402 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-10T13:19:36.402 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-10T13:19:36.402 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-10T13:19:36.402 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-10T13:19:36.402 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-10T13:19:36.402 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-10T13:19:36.402 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-10T13:19:36.402 INFO:journalctl@ceph.mon.b.vm08.stdout: RuntimeError: Failed command: systemctl restart ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo: Job for ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service failed because the control process exited with error code. 2026-03-10T13:19:36.402 INFO:journalctl@ceph.mon.b.vm08.stdout: See "systemctl status ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" and "journalctl -xeu ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" for details. 2026-03-10T13:19:36.402 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:36 vm08 ceph-mon[49535]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-10T13:19:36.402 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:36 vm08 ceph-mon[49535]: Reconfiguring daemon prometheus.a on vm08 2026-03-10T13:19:36.402 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:36 vm08 ceph-mon[49535]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:36.402 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:36 vm08 ceph-mon[49535]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:36.402 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:36 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T13:19:36.402 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:36 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm08.local:9095"}]: dispatch 2026-03-10T13:19:36.402 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:36 vm08 ceph-mon[49535]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:36 vm00 ceph-mon[47364]: Reconfiguring iscsi.foo.vm00.dezodo (dependencies changed)... 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:36 vm00 ceph-mon[47364]: Reconfiguring daemon iscsi.foo.vm00.dezodo on vm00 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:36 vm00 ceph-mon[47364]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:36 vm00 ceph-mon[47364]: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm00.dezodo ... 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout: Creating ceph-iscsi config... 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout: Write file: /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/iscsi.foo.vm00.dezodo/iscsi-gateway.cfg 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout: Write file: /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/iscsi.foo.vm00.dezodo/tcmu-runner-entrypoint.sh 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout: Non-zero exit code 1 from systemctl restart ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout: systemctl: stderr Job for ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service failed because the control process exited with error code. 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout: systemctl: stderr See "systemctl status ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" and "journalctl -xeu ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" for details. 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout: Traceback (most recent call last): 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout: return _run_code(code, main_globals, None, 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout: exec(code, run_globals) 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout: RuntimeError: Failed command: systemctl restart ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo: Job for ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service failed because the control process exited with error code. 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout: See "systemctl status ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" and "journalctl -xeu ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" for details. 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout: Traceback (most recent call last): 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1145, in _check_daemons 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout: self.mgr._daemon_action(daemon_spec, action=action) 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 2545, in _daemon_action 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout: return self.wait_async( 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout: return self.event_loop.get_result(coro, timeout) 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout: return future.result(timeout) 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout: return self.__get_result() 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout: raise self._exception 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1381, in _create_daemon 2026-03-10T13:19:36.503 INFO:journalctl@ceph.mon.a.vm00.stdout: out, err, code = await self._run_cephadm( 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1724, in _run_cephadm 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.a.vm00.stdout: raise OrchestratorError( 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.a.vm00.stdout: orchestrator._interface.OrchestratorError: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm00.dezodo ... 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.a.vm00.stdout: Creating ceph-iscsi config... 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.a.vm00.stdout: Write file: /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/iscsi.foo.vm00.dezodo/iscsi-gateway.cfg 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.a.vm00.stdout: Write file: /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/iscsi.foo.vm00.dezodo/tcmu-runner-entrypoint.sh 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.a.vm00.stdout: Non-zero exit code 1 from systemctl restart ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.a.vm00.stdout: systemctl: stderr Job for ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service failed because the control process exited with error code. 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.a.vm00.stdout: systemctl: stderr See "systemctl status ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" and "journalctl -xeu ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" for details. 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.a.vm00.stdout: Traceback (most recent call last): 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.a.vm00.stdout: return _run_code(code, main_globals, None, 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.a.vm00.stdout: exec(code, run_globals) 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.a.vm00.stdout: RuntimeError: Failed command: systemctl restart ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo: Job for ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service failed because the control process exited with error code. 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.a.vm00.stdout: See "systemctl status ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" and "journalctl -xeu ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" for details. 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:36 vm00 ceph-mon[47364]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:36 vm00 ceph-mon[47364]: Reconfiguring daemon prometheus.a on vm08 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:36 vm00 ceph-mon[47364]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:36 vm00 ceph-mon[47364]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:36 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:36 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm08.local:9095"}]: dispatch 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:36 vm00 ceph-mon[47364]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:36 vm00 ceph-mon[51670]: Reconfiguring iscsi.foo.vm00.dezodo (dependencies changed)... 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:36 vm00 ceph-mon[51670]: Reconfiguring daemon iscsi.foo.vm00.dezodo on vm00 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:36 vm00 ceph-mon[51670]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:36 vm00 ceph-mon[51670]: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm00.dezodo ... 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.c.vm00.stdout: Creating ceph-iscsi config... 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.c.vm00.stdout: Write file: /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/iscsi.foo.vm00.dezodo/iscsi-gateway.cfg 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.c.vm00.stdout: Write file: /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/iscsi.foo.vm00.dezodo/tcmu-runner-entrypoint.sh 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.c.vm00.stdout: Non-zero exit code 1 from systemctl restart ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.c.vm00.stdout: systemctl: stderr Job for ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service failed because the control process exited with error code. 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.c.vm00.stdout: systemctl: stderr See "systemctl status ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" and "journalctl -xeu ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" for details. 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.c.vm00.stdout: Traceback (most recent call last): 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.c.vm00.stdout: return _run_code(code, main_globals, None, 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.c.vm00.stdout: exec(code, run_globals) 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-10T13:19:36.504 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: RuntimeError: Failed command: systemctl restart ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo: Job for ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service failed because the control process exited with error code. 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: See "systemctl status ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" and "journalctl -xeu ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" for details. 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: Traceback (most recent call last): 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1145, in _check_daemons 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: self.mgr._daemon_action(daemon_spec, action=action) 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 2545, in _daemon_action 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: return self.wait_async( 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: return self.event_loop.get_result(coro, timeout) 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: return future.result(timeout) 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: return self.__get_result() 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: raise self._exception 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1381, in _create_daemon 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: out, err, code = await self._run_cephadm( 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1724, in _run_cephadm 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: raise OrchestratorError( 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: orchestrator._interface.OrchestratorError: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm00.dezodo ... 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: Creating ceph-iscsi config... 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: Write file: /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/iscsi.foo.vm00.dezodo/iscsi-gateway.cfg 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: Write file: /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/iscsi.foo.vm00.dezodo/tcmu-runner-entrypoint.sh 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: Non-zero exit code 1 from systemctl restart ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: systemctl: stderr Job for ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service failed because the control process exited with error code. 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: systemctl: stderr See "systemctl status ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" and "journalctl -xeu ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" for details. 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: Traceback (most recent call last): 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: return _run_code(code, main_globals, None, 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: exec(code, run_globals) 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: RuntimeError: Failed command: systemctl restart ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo: Job for ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service failed because the control process exited with error code. 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout: See "systemctl status ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" and "journalctl -xeu ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" for details. 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:36 vm00 ceph-mon[51670]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:36 vm00 ceph-mon[51670]: Reconfiguring daemon prometheus.a on vm08 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:36 vm00 ceph-mon[51670]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:36 vm00 ceph-mon[51670]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:36 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:36 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm08.local:9095"}]: dispatch 2026-03-10T13:19:36.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:36 vm00 ceph-mon[51670]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:37.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:19:36 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:19:36.952Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:19:37.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:19:36 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:19:36.953Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:19:37.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:37 vm00 ceph-mon[47364]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T13:19:37.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:37 vm00 ceph-mon[47364]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm08.local:9095"}]: dispatch 2026-03-10T13:19:37.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:37 vm00 ceph-mon[47364]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:37.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:37 vm00 ceph-mon[47364]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:37.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:37 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:19:37.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:37 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:19:37.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:37 vm00 ceph-mon[47364]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:37.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:37 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:19:37.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:37 vm00 ceph-mon[47364]: from='mgr.15027 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:19:37.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:37 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:19:37.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:37 vm00 ceph-mon[51670]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T13:19:37.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:37 vm00 ceph-mon[51670]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm08.local:9095"}]: dispatch 2026-03-10T13:19:37.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:37 vm00 ceph-mon[51670]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:37.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:37 vm00 ceph-mon[51670]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:37.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:37 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:19:37.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:37 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:19:37.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:37 vm00 ceph-mon[51670]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:37.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:37 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:19:37.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:37 vm00 ceph-mon[51670]: from='mgr.15027 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:19:37.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:37 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:19:37.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:37 vm08 ceph-mon[49535]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T13:19:37.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:37 vm08 ceph-mon[49535]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm08.local:9095"}]: dispatch 2026-03-10T13:19:37.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:37 vm08 ceph-mon[49535]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:37.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:37 vm08 ceph-mon[49535]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:37.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:37 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:19:37.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:37 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:19:37.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:37 vm08 ceph-mon[49535]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:37.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:37 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:19:37.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:37 vm08 ceph-mon[49535]: from='mgr.15027 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:19:37.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:37 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:19:38.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:38 vm00 ceph-mon[47364]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:19:38.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:38 vm00 ceph-mon[47364]: Reconfiguring iscsi.foo.vm00.dezodo (dependencies changed)... 2026-03-10T13:19:38.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:38 vm00 ceph-mon[47364]: Reconfiguring daemon iscsi.foo.vm00.dezodo on vm00 2026-03-10T13:19:38.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:38 vm00 ceph-mon[47364]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:38.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:38 vm00 ceph-mon[47364]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:38.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:38 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T13:19:38.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:38 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T13:19:38.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:38 vm00 ceph-mon[51670]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:19:38.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:38 vm00 ceph-mon[51670]: Reconfiguring iscsi.foo.vm00.dezodo (dependencies changed)... 2026-03-10T13:19:38.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:38 vm00 ceph-mon[51670]: Reconfiguring daemon iscsi.foo.vm00.dezodo on vm00 2026-03-10T13:19:38.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:38 vm00 ceph-mon[51670]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:38.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:38 vm00 ceph-mon[51670]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:38.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:38 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T13:19:38.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:38 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T13:19:38.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:38 vm08 ceph-mon[49535]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:19:38.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:38 vm08 ceph-mon[49535]: Reconfiguring iscsi.foo.vm00.dezodo (dependencies changed)... 2026-03-10T13:19:38.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:38 vm08 ceph-mon[49535]: Reconfiguring daemon iscsi.foo.vm00.dezodo on vm00 2026-03-10T13:19:38.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:38 vm08 ceph-mon[49535]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:38.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:38 vm08 ceph-mon[49535]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:38.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:38 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T13:19:38.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:38 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T13:19:39.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:39 vm00 ceph-mon[51670]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T13:19:39.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:39 vm00 ceph-mon[51670]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T13:19:39.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:39 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/1177178313' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T13:19:39.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:39 vm00 ceph-mon[47364]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T13:19:39.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:39 vm00 ceph-mon[47364]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T13:19:39.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:39 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/1177178313' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T13:19:39.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:39 vm08 ceph-mon[49535]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T13:19:39.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:39 vm08 ceph-mon[49535]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T13:19:39.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:39 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/1177178313' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T13:19:40.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:40 vm00 ceph-mon[47364]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-10T13:19:40.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:40 vm00 ceph-mon[47364]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:40.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:40 vm00 ceph-mon[47364]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:40.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:40 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:19:40.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:40 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:19:40.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:40 vm00 ceph-mon[47364]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:40.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:40 vm00 ceph-mon[51670]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-10T13:19:40.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:40 vm00 ceph-mon[51670]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:40.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:40 vm00 ceph-mon[51670]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:40.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:40 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:19:40.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:40 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:19:40.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:40 vm00 ceph-mon[51670]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:40.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:40 vm08 ceph-mon[49535]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-10T13:19:40.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:40 vm08 ceph-mon[49535]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:40.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:40 vm08 ceph-mon[49535]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:40.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:40 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:19:40.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:40 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:19:40.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:40 vm08 ceph-mon[49535]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:19:41.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:41 vm00 ceph-mon[47364]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-10T13:19:41.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:41 vm00 ceph-mon[51670]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-10T13:19:41.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:41 vm08 ceph-mon[49535]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-10T13:19:44.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:43 vm00 ceph-mon[47364]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-10T13:19:44.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:43 vm00 ceph-mon[51670]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-10T13:19:44.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:43 vm08 ceph-mon[49535]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-10T13:19:45.946 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:45 vm00 ceph-mon[51670]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-10T13:19:45.947 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:45 vm00 ceph-mon[47364]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-10T13:19:45.947 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:45 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:19:45] "GET /metrics HTTP/1.1" 200 34538 "" "Prometheus/2.51.0" 2026-03-10T13:19:46.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:45 vm08 ceph-mon[49535]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-10T13:19:46.955 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:46 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:19:46.955 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:19:46 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:19:46.953Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:19:46.955 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:46 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:19:47.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:46 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:19:47.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:19:46 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:19:46.955Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:19:48.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:47 vm00 ceph-mon[47364]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-10T13:19:48.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:47 vm00 ceph-mon[51670]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-10T13:19:48.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:47 vm08 ceph-mon[49535]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-10T13:19:50.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:49 vm00 ceph-mon[47364]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:19:50.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:49 vm00 ceph-mon[47364]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-10T13:19:50.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:49 vm00 ceph-mon[51670]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:19:50.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:49 vm00 ceph-mon[51670]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-10T13:19:50.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:49 vm08 ceph-mon[49535]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:19:50.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:49 vm08 ceph-mon[49535]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-10T13:19:52.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:51 vm00 ceph-mon[47364]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-10T13:19:52.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:51 vm00 ceph-mon[51670]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-10T13:19:52.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:51 vm08 ceph-mon[49535]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-10T13:19:54.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:53 vm00 ceph-mon[47364]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:19:54.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:53 vm00 ceph-mon[51670]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:19:54.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:53 vm08 ceph-mon[49535]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:19:56.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:55 vm00 ceph-mon[47364]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:19:56.003 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:19:55 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:19:55] "GET /metrics HTTP/1.1" 200 37544 "" "Prometheus/2.51.0" 2026-03-10T13:19:56.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:55 vm00 ceph-mon[51670]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:19:56.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:55 vm08 ceph-mon[49535]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:19:57.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:19:56 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:19:56.955Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:19:57.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:19:56 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:19:56.956Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:19:58.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:57 vm00 ceph-mon[47364]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:19:58.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:57 vm00 ceph-mon[51670]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:19:58.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:57 vm08 ceph-mon[49535]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:00.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:59 vm00 ceph-mon[47364]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:20:00.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:19:59 vm00 ceph-mon[47364]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:20:00.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:59 vm00 ceph-mon[51670]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:20:00.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:19:59 vm00 ceph-mon[51670]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:20:00.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:59 vm08 ceph-mon[49535]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:20:00.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:19:59 vm08 ceph-mon[49535]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:20:01.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:00 vm00 ceph-mon[47364]: overall HEALTH_OK 2026-03-10T13:20:01.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:00 vm00 ceph-mon[51670]: overall HEALTH_OK 2026-03-10T13:20:01.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:00 vm08 ceph-mon[49535]: overall HEALTH_OK 2026-03-10T13:20:02.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:01 vm00 ceph-mon[47364]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:02.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:01 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:20:02.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:01 vm00 ceph-mon[51670]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:02.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:01 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:20:02.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:01 vm08 ceph-mon[49535]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:02.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:01 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:20:04.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:03 vm00 ceph-mon[47364]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:20:04.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:03 vm00 ceph-mon[51670]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:20:04.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:03 vm08 ceph-mon[49535]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:20:06.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:05 vm00 ceph-mon[47364]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:06.003 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:20:05 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:20:05] "GET /metrics HTTP/1.1" 200 37542 "" "Prometheus/2.51.0" 2026-03-10T13:20:06.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:05 vm00 ceph-mon[51670]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:06.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:05 vm08 ceph-mon[49535]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:07.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:20:06 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:20:06.956Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:20:07.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:20:06 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:20:06.957Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:20:08.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:07 vm08 ceph-mon[49535]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:08.168 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:07 vm00 ceph-mon[47364]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:08.168 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:07 vm00 ceph-mon[51670]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:10.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:09 vm00 ceph-mon[47364]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:20:10.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:09 vm00 ceph-mon[47364]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:20:10.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:09 vm00 ceph-mon[51670]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:20:10.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:09 vm00 ceph-mon[51670]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:20:10.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:09 vm08 ceph-mon[49535]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:20:10.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:09 vm08 ceph-mon[49535]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:20:12.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:11 vm00 ceph-mon[47364]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:12.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:11 vm00 ceph-mon[51670]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:12.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:11 vm08 ceph-mon[49535]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:14.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:13 vm00 ceph-mon[47364]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:20:14.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:13 vm00 ceph-mon[51670]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:20:14.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:13 vm08 ceph-mon[49535]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:20:16.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:15 vm00 ceph-mon[47364]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:16.003 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:20:15 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:20:15] "GET /metrics HTTP/1.1" 200 37542 "" "Prometheus/2.51.0" 2026-03-10T13:20:16.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:15 vm00 ceph-mon[51670]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:16.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:15 vm08 ceph-mon[49535]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:17.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:16 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:20:17.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:16 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:20:17.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:20:16 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:20:16.957Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:20:17.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:20:16 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:20:16.958Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:20:17.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:16 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:20:18.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:17 vm08 ceph-mon[49535]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:18.173 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:17 vm00 ceph-mon[47364]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:18.174 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:17 vm00 ceph-mon[51670]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:20.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:19 vm00 ceph-mon[47364]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:20:20.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:19 vm00 ceph-mon[47364]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:20:20.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:19 vm00 ceph-mon[51670]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:20:20.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:19 vm00 ceph-mon[51670]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:20:20.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:19 vm08 ceph-mon[49535]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:20:20.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:19 vm08 ceph-mon[49535]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:20:22.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:21 vm00 ceph-mon[47364]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:22.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:21 vm00 ceph-mon[51670]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:22.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:21 vm08 ceph-mon[49535]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:24.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:23 vm00 ceph-mon[47364]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:20:24.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:23 vm00 ceph-mon[51670]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:20:24.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:23 vm08 ceph-mon[49535]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:20:25.942 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:20:25 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:20:25] "GET /metrics HTTP/1.1" 200 37541 "" "Prometheus/2.51.0" 2026-03-10T13:20:26.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:25 vm00 ceph-mon[47364]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:26.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:25 vm00 ceph-mon[51670]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:26.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:25 vm08 ceph-mon[49535]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:27.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:20:26 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:20:26.958Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:20:27.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:20:26 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:20:26.958Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:20:28.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:27 vm00 ceph-mon[47364]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:28.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:27 vm00 ceph-mon[51670]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:28.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:27 vm08 ceph-mon[49535]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:30.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:29 vm00 ceph-mon[47364]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:20:30.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:29 vm00 ceph-mon[47364]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:20:30.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:29 vm00 ceph-mon[51670]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:20:30.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:29 vm00 ceph-mon[51670]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:20:30.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:29 vm08 ceph-mon[49535]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:20:30.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:29 vm08 ceph-mon[49535]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:20:32.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:31 vm00 ceph-mon[47364]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:32.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:31 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:20:32.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:31 vm00 ceph-mon[51670]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:32.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:31 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:20:32.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:31 vm08 ceph-mon[49535]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:32.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:31 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:20:34.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:33 vm00 ceph-mon[47364]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:20:34.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:33 vm00 ceph-mon[51670]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:20:34.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:33 vm08 ceph-mon[49535]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:20:36.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:35 vm00 ceph-mon[47364]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:36.002 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:20:35 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:20:35] "GET /metrics HTTP/1.1" 200 37538 "" "Prometheus/2.51.0" 2026-03-10T13:20:36.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:35 vm00 ceph-mon[51670]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:36.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:35 vm08 ceph-mon[49535]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:37.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:20:36 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:20:36.958Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:20:37.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:20:36 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:20:36.960Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:20:38.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:37 vm08 ceph-mon[49535]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:38.186 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:37 vm00 ceph-mon[47364]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:38.222 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:37 vm00 ceph-mon[51670]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:40.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:39 vm00 ceph-mon[47364]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:20:40.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:39 vm00 ceph-mon[47364]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:20:40.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:39 vm00 ceph-mon[51670]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:20:40.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:39 vm00 ceph-mon[51670]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:20:40.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:39 vm08 ceph-mon[49535]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:20:40.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:39 vm08 ceph-mon[49535]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:20:41.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:40 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:20:41.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:40 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:20:41.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:40 vm00 ceph-mon[47364]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:20:41.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:40 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:20:41.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:40 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:20:41.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:40 vm00 ceph-mon[51670]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:20:41.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:40 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:20:41.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:40 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:20:41.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:40 vm08 ceph-mon[49535]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:20:42.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:41 vm00 ceph-mon[47364]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:42.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:41 vm00 ceph-mon[51670]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:42.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:41 vm08 ceph-mon[49535]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:44.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:43 vm00 ceph-mon[47364]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:20:44.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:43 vm00 ceph-mon[51670]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:20:44.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:43 vm08 ceph-mon[49535]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:20:46.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:45 vm00 ceph-mon[47364]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:46.002 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:20:45 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:20:45] "GET /metrics HTTP/1.1" 200 37538 "" "Prometheus/2.51.0" 2026-03-10T13:20:46.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:45 vm00 ceph-mon[51670]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:46.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:45 vm08 ceph-mon[49535]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:47.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:46 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:20:47.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:46 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:20:47.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:20:46 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:20:46.960Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:20:47.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:20:46 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:20:46.961Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:20:47.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:46 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:20:48.192 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:47 vm00 ceph-mon[47364]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:48.192 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:47 vm00 ceph-mon[51670]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:48.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:47 vm08 ceph-mon[49535]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:50.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:49 vm00 ceph-mon[47364]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:20:50.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:49 vm00 ceph-mon[47364]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:20:50.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:49 vm00 ceph-mon[51670]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:20:50.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:49 vm00 ceph-mon[51670]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:20:50.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:49 vm08 ceph-mon[49535]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:20:50.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:49 vm08 ceph-mon[49535]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:20:52.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:51 vm00 ceph-mon[47364]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:52.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:51 vm00 ceph-mon[51670]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:52.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:51 vm08 ceph-mon[49535]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:54.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:53 vm08 ceph-mon[49535]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:20:54.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:53 vm00 ceph-mon[47364]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:20:54.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:53 vm00 ceph-mon[51670]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:20:56.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:55 vm00 ceph-mon[47364]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:56.003 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:20:55 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:20:55] "GET /metrics HTTP/1.1" 200 37539 "" "Prometheus/2.51.0" 2026-03-10T13:20:56.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:55 vm00 ceph-mon[51670]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:56.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:55 vm08 ceph-mon[49535]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:57.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:20:56 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:20:56.961Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:20:57.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:20:56 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:20:56.962Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:20:58.202 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:57 vm00 ceph-mon[47364]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:58.202 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:57 vm00 ceph-mon[51670]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:20:58.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:57 vm08 ceph-mon[49535]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:00.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:59 vm00 ceph-mon[47364]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:21:00.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:20:59 vm00 ceph-mon[47364]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:21:00.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:59 vm00 ceph-mon[51670]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:21:00.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:20:59 vm00 ceph-mon[51670]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:21:00.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:59 vm08 ceph-mon[49535]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:21:00.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:20:59 vm08 ceph-mon[49535]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:21:02.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:01 vm00 ceph-mon[47364]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:02.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:01 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:21:02.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:01 vm00 ceph-mon[51670]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:02.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:01 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:21:02.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:01 vm08 ceph-mon[49535]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:02.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:01 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:21:04.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:03 vm08 ceph-mon[49535]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:21:04.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:03 vm00 ceph-mon[47364]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:21:04.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:03 vm00 ceph-mon[51670]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:21:06.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:05 vm00 ceph-mon[47364]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:06.002 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:21:05 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:21:05] "GET /metrics HTTP/1.1" 200 37537 "" "Prometheus/2.51.0" 2026-03-10T13:21:06.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:05 vm00 ceph-mon[51670]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:06.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:05 vm08 ceph-mon[49535]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:07.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:21:06 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:21:06.961Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:21:07.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:21:06 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:21:06.962Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:21:08.208 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:07 vm00 ceph-mon[47364]: pgmap v51: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:08.208 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:07 vm00 ceph-mon[51670]: pgmap v51: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:08.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:07 vm08 ceph-mon[49535]: pgmap v51: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:10.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:09 vm00 ceph-mon[47364]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:21:10.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:09 vm00 ceph-mon[47364]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:21:10.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:09 vm00 ceph-mon[51670]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:21:10.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:09 vm00 ceph-mon[51670]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:21:10.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:09 vm08 ceph-mon[49535]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:21:10.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:09 vm08 ceph-mon[49535]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:21:12.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:11 vm08 ceph-mon[49535]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:12.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:11 vm00 ceph-mon[47364]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:12.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:11 vm00 ceph-mon[51670]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:14.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:13 vm08 ceph-mon[49535]: pgmap v54: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:21:14.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:13 vm00 ceph-mon[47364]: pgmap v54: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:21:14.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:13 vm00 ceph-mon[51670]: pgmap v54: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:21:16.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:15 vm00 ceph-mon[47364]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:16.003 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:21:15 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:21:15] "GET /metrics HTTP/1.1" 200 37537 "" "Prometheus/2.51.0" 2026-03-10T13:21:16.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:15 vm00 ceph-mon[51670]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:16.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:15 vm08 ceph-mon[49535]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:17.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:16 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:21:17.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:16 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:21:17.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:21:16 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:21:16.962Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:21:17.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:21:16 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:21:16.963Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:21:17.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:16 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:21:18.209 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:17 vm00 ceph-mon[47364]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:18.209 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:17 vm00 ceph-mon[51670]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:18.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:17 vm08 ceph-mon[49535]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:20.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:19 vm00 ceph-mon[47364]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:21:20.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:19 vm00 ceph-mon[47364]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:21:20.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:19 vm00 ceph-mon[51670]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:21:20.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:19 vm00 ceph-mon[51670]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:21:20.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:19 vm08 ceph-mon[49535]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:21:20.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:19 vm08 ceph-mon[49535]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:21:22.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:21 vm08 ceph-mon[49535]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:22.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:21 vm00 ceph-mon[47364]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:22.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:21 vm00 ceph-mon[51670]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:24.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:23 vm08 ceph-mon[49535]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:21:24.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:23 vm00 ceph-mon[47364]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:21:24.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:23 vm00 ceph-mon[51670]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:21:26.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:25 vm00 ceph-mon[47364]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:26.002 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:21:25 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:21:25] "GET /metrics HTTP/1.1" 200 37537 "" "Prometheus/2.51.0" 2026-03-10T13:21:26.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:25 vm00 ceph-mon[51670]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:26.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:25 vm08 ceph-mon[49535]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:27.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:21:26 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:21:26.963Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:21:27.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:21:26 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:21:26.964Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:21:28.220 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:27 vm00 ceph-mon[47364]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:28.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:27 vm00 ceph-mon[51670]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:28.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:27 vm08 ceph-mon[49535]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:30.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:29 vm00 ceph-mon[47364]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:21:30.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:29 vm00 ceph-mon[47364]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:21:30.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:29 vm00 ceph-mon[51670]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:21:30.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:29 vm00 ceph-mon[51670]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:21:30.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:29 vm08 ceph-mon[49535]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:21:30.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:29 vm08 ceph-mon[49535]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:21:32.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:32 vm08 ceph-mon[49535]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:32.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:32 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:21:32.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:32 vm00 ceph-mon[47364]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:32.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:32 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:21:32.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:32 vm00 ceph-mon[51670]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:32.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:32 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:21:34.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:34 vm08 ceph-mon[49535]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:21:34.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:34 vm00 ceph-mon[47364]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:21:34.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:34 vm00 ceph-mon[51670]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:21:36.002 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:21:35 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:21:35] "GET /metrics HTTP/1.1" 200 37533 "" "Prometheus/2.51.0" 2026-03-10T13:21:36.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:36 vm00 ceph-mon[47364]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:36.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:36 vm00 ceph-mon[51670]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:36.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:36 vm08 ceph-mon[49535]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:37.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:21:36 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:21:36.964Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:21:37.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:21:36 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:21:36.965Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:21:38.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:38 vm00 ceph-mon[47364]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:38.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:38 vm00 ceph-mon[51670]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:38.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:38 vm08 ceph-mon[49535]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:40.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:39 vm00 ceph-mon[47364]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:21:40.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:39 vm00 ceph-mon[47364]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:21:40.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:39 vm00 ceph-mon[51670]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:21:40.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:39 vm00 ceph-mon[51670]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:21:40.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:39 vm08 ceph-mon[49535]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:21:40.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:39 vm08 ceph-mon[49535]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:21:41.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:40 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:21:41.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:40 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:21:41.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:40 vm00 ceph-mon[47364]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:21:41.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:40 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:21:41.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:40 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:21:41.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:40 vm00 ceph-mon[51670]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:21:41.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:40 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:21:41.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:40 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:21:41.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:40 vm08 ceph-mon[49535]: from='mgr.15027 ' entity='mgr.y' 2026-03-10T13:21:42.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:41 vm00 ceph-mon[47364]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:42.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:41 vm00 ceph-mon[51670]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:42.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:41 vm08 ceph-mon[49535]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:44.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:43 vm00 ceph-mon[47364]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:21:44.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:43 vm00 ceph-mon[51670]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:21:44.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:43 vm08 ceph-mon[49535]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:21:46.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:45 vm00 ceph-mon[47364]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:46.002 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:21:45 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:21:45] "GET /metrics HTTP/1.1" 200 37533 "" "Prometheus/2.51.0" 2026-03-10T13:21:46.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:45 vm00 ceph-mon[51670]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:46.071 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:45 vm08 ceph-mon[49535]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:47.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:46 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:21:47.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:21:46 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:21:46.965Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:21:47.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:21:46 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:21:46.966Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:21:47.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:46 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:21:47.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:46 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:21:48.237 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:47 vm00 ceph-mon[47364]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:48.237 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:47 vm00 ceph-mon[51670]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:48.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:47 vm08 ceph-mon[49535]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:50.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:49 vm00 ceph-mon[47364]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:21:50.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:49 vm00 ceph-mon[47364]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:21:50.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:49 vm00 ceph-mon[51670]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:21:50.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:49 vm00 ceph-mon[51670]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:21:50.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:49 vm08 ceph-mon[49535]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:21:50.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:49 vm08 ceph-mon[49535]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:21:52.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:51 vm08 ceph-mon[49535]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:52.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:51 vm00 ceph-mon[47364]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:52.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:51 vm00 ceph-mon[51670]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:54.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:53 vm08 ceph-mon[49535]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:21:54.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:53 vm00 ceph-mon[47364]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:21:54.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:53 vm00 ceph-mon[51670]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:21:56.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:55 vm00 ceph-mon[47364]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:56.002 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:21:55 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:21:55] "GET /metrics HTTP/1.1" 200 37548 "" "Prometheus/2.51.0" 2026-03-10T13:21:56.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:55 vm00 ceph-mon[51670]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:56.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:55 vm08 ceph-mon[49535]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:57.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:21:56 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:21:56.966Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:21:57.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:21:56 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:21:56.967Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:21:58.243 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:57 vm00 ceph-mon[47364]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:58.243 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:57 vm00 ceph-mon[51670]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:21:58.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:57 vm08 ceph-mon[49535]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:22:00.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:59 vm00 ceph-mon[47364]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:22:00.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:21:59 vm00 ceph-mon[47364]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:22:00.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:59 vm00 ceph-mon[51670]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:22:00.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:21:59 vm00 ceph-mon[51670]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:22:00.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:59 vm08 ceph-mon[49535]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:22:00.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:21:59 vm08 ceph-mon[49535]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:22:02.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:01 vm00 ceph-mon[47364]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:22:02.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:01 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:22:02.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:01 vm00 ceph-mon[51670]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:22:02.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:01 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:22:02.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:01 vm08 ceph-mon[49535]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:22:02.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:01 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:22:04.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:03 vm00 ceph-mon[47364]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:22:04.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:03 vm00 ceph-mon[51670]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:22:04.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:03 vm08 ceph-mon[49535]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:22:06.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:05 vm00 ceph-mon[47364]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:22:06.002 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:05 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:22:05] "GET /metrics HTTP/1.1" 200 37544 "" "Prometheus/2.51.0" 2026-03-10T13:22:06.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:05 vm00 ceph-mon[51670]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:22:06.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:05 vm08 ceph-mon[49535]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:22:07.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:22:06 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:22:06.966Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:22:07.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:22:06 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:22:06.968Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:22:08.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:07 vm00 ceph-mon[47364]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:22:08.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:07 vm00 ceph-mon[51670]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:22:08.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:07 vm08 ceph-mon[49535]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:22:10.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:09 vm00 ceph-mon[47364]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:22:10.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:09 vm00 ceph-mon[47364]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:22:10.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:09 vm00 ceph-mon[51670]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:22:10.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:09 vm00 ceph-mon[51670]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:22:10.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:09 vm08 ceph-mon[49535]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:22:10.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:09 vm08 ceph-mon[49535]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:22:12.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:11 vm00 ceph-mon[47364]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:22:12.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:11 vm00 ceph-mon[51670]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:22:12.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:11 vm08 ceph-mon[49535]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:22:14.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:13 vm00 ceph-mon[47364]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:22:14.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:13 vm00 ceph-mon[51670]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:22:14.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:13 vm08 ceph-mon[49535]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:22:15.961 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:15 vm00 ceph-mon[47364]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:22:15.961 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:15 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:22:15] "GET /metrics HTTP/1.1" 200 37544 "" "Prometheus/2.51.0" 2026-03-10T13:22:16.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:15 vm00 ceph-mon[51670]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:22:16.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:15 vm08 ceph-mon[49535]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:22:17.154 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:16 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:22:17.154 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:22:16 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:22:16.968Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:22:17.154 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:22:16 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:22:16.968Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:22:17.154 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:16 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:22:17.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:16 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:22:18.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:17 vm00 ceph-mon[47364]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:22:18.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:17 vm00 ceph-mon[51670]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:22:18.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:17 vm08 ceph-mon[49535]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:22:20.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:19 vm00 ceph-mon[47364]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:22:20.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:19 vm00 ceph-mon[47364]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:22:20.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:19 vm00 ceph-mon[51670]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:22:20.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:19 vm00 ceph-mon[51670]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:22:20.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:19 vm08 ceph-mon[49535]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:22:20.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:19 vm08 ceph-mon[49535]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:22:22.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:21 vm00 ceph-mon[47364]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:22:22.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:21 vm00 ceph-mon[51670]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:22:22.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:21 vm08 ceph-mon[49535]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:22:24.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:23 vm00 ceph-mon[47364]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:22:24.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:23 vm00 ceph-mon[51670]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:22:24.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:23 vm08 ceph-mon[49535]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:22:26.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:25 vm00 ceph-mon[47364]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:22:26.003 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:25 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:22:25] "GET /metrics HTTP/1.1" 200 37547 "" "Prometheus/2.51.0" 2026-03-10T13:22:26.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:25 vm00 ceph-mon[51670]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:22:26.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:25 vm08 ceph-mon[49535]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:22:27.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:22:26 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:22:26.968Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:22:27.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:22:26 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:22:26.969Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:22:28.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:27 vm00 ceph-mon[47364]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:22:28.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:27 vm00 ceph-mon[51670]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:22:28.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:27 vm08 ceph-mon[49535]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:22:30.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:29 vm00 ceph-mon[47364]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:22:30.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:29 vm00 ceph-mon[47364]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:22:30.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:29 vm00 ceph-mon[51670]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:22:30.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:29 vm00 ceph-mon[51670]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:22:30.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:29 vm08 ceph-mon[49535]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:22:30.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:29 vm08 ceph-mon[49535]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:22:31.152 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-10T13:22:31.740 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T13:22:31.740 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (8m) 2m ago 15m 23.7M - 0.25.0 c8568f914cd2 12fde3cf83cb 2026-03-10T13:22:31.740 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm08 *:3000 running (8m) 2m ago 15m 47.2M - dad864ee21e9 263cac442a99 2026-03-10T13:22:31.740 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dezodo vm00 running (2m) 2m ago 15m 53.6M - 3.5 e1d6a67b021e c7c9a300ffef 2026-03-10T13:22:31.740 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm08 *:8443,9283 running (11m) 2m ago 17m 485M - 19.2.3-678-ge911bdeb 654f31e6858e 62b908c184a8 2026-03-10T13:22:31.740 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (3m) 2m ago 18m 545M - 19.2.3-678-ge911bdeb 654f31e6858e 5bc576d4d32b 2026-03-10T13:22:31.740 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (18m) 2m ago 18m 67.2M 2048M 17.2.0 e1d6a67b021e f0e3f322471c 2026-03-10T13:22:31.740 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm08 running (17m) 2m ago 17m 49.8M 2048M 17.2.0 e1d6a67b021e d3c1458bc898 2026-03-10T13:22:31.740 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (17m) 2m ago 17m 51.3M 2048M 17.2.0 e1d6a67b021e d00b7fd44c23 2026-03-10T13:22:31.740 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (9m) 2m ago 15m 10.4M - 1.7.0 72c9c2088986 bcf883401619 2026-03-10T13:22:31.740 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm08 *:9100 running (8m) 2m ago 15m 9.94M - 1.7.0 72c9c2088986 4ac83f03f818 2026-03-10T13:22:31.740 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (17m) 2m ago 17m 51.9M 4096M 17.2.0 e1d6a67b021e 2919c7073fa7 2026-03-10T13:22:31.740 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (16m) 2m ago 16m 57.4M 4096M 17.2.0 e1d6a67b021e 647927dc41ea 2026-03-10T13:22:31.740 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (16m) 2m ago 16m 54.3M 4096M 17.2.0 e1d6a67b021e 1e417e82c2b9 2026-03-10T13:22:31.740 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (16m) 2m ago 16m 52.4M 4096M 17.2.0 e1d6a67b021e 98bb6d7ea69f 2026-03-10T13:22:31.740 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm08 running (16m) 2m ago 16m 54.2M 4096M 17.2.0 e1d6a67b021e e349440ca776 2026-03-10T13:22:31.740 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm08 running (16m) 2m ago 16m 55.4M 4096M 17.2.0 e1d6a67b021e 9a6a2a67cc99 2026-03-10T13:22:31.740 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm08 running (16m) 2m ago 16m 51.0M 4096M 17.2.0 e1d6a67b021e c27676916d52 2026-03-10T13:22:31.740 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm08 running (16m) 2m ago 16m 52.7M 4096M 17.2.0 e1d6a67b021e bf67951990a5 2026-03-10T13:22:31.740 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm08 *:9095 running (2m) 2m ago 15m 46.6M - 2.51.0 1d3b7f56885b 3b07f384b4b6 2026-03-10T13:22:31.740 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.tvlvzo vm00 *:8000 running (15m) 2m ago 15m 94.7M - 17.2.0 e1d6a67b021e be24eac16807 2026-03-10T13:22:31.740 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm08.ljayps vm08 *:8000 running (15m) 2m ago 15m 92.1M - 17.2.0 e1d6a67b021e 67b525427823 2026-03-10T13:22:31.880 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions' 2026-03-10T13:22:32.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:31 vm00 ceph-mon[47364]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:22:32.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:31 vm00 ceph-mon[47364]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:22:32.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:31 vm00 ceph-mon[51670]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:22:32.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:31 vm00 ceph-mon[51670]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:22:32.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:31 vm08 ceph-mon[49535]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:22:32.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:31 vm08 ceph-mon[49535]: from='mgr.15027 192.168.123.100:0/731315060' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:22:32.631 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:22:32.631 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T13:22:32.631 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-10T13:22:32.631 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:22:32.631 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T13:22:32.631 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T13:22:32.631 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:22:32.631 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T13:22:32.631 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-10T13:22:32.631 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:22:32.631 INFO:teuthology.orchestra.run.vm00.stdout: "mds": {}, 2026-03-10T13:22:32.631 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T13:22:32.631 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-10T13:22:32.631 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:22:32.631 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T13:22:32.631 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 13, 2026-03-10T13:22:32.631 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T13:22:32.631 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T13:22:32.631 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:22:32.704 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph -s' 2026-03-10T13:22:32.887 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:32 vm00 ceph-mon[51670]: from='client.15096 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:22:32.887 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:32 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/255739050' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:22:32.889 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:32 vm00 ceph-mon[47364]: from='client.15096 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:22:32.889 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:32 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/255739050' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:22:33.251 INFO:teuthology.orchestra.run.vm00.stdout: cluster: 2026-03-10T13:22:33.251 INFO:teuthology.orchestra.run.vm00.stdout: id: 98a3dada-1c81-11f1-89c9-d57c120f78d5 2026-03-10T13:22:33.251 INFO:teuthology.orchestra.run.vm00.stdout: health: HEALTH_OK 2026-03-10T13:22:33.251 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T13:22:33.251 INFO:teuthology.orchestra.run.vm00.stdout: services: 2026-03-10T13:22:33.251 INFO:teuthology.orchestra.run.vm00.stdout: mon: 3 daemons, quorum a,c,b (age 17m) 2026-03-10T13:22:33.251 INFO:teuthology.orchestra.run.vm00.stdout: mgr: y(active, since 3m), standbys: x 2026-03-10T13:22:33.251 INFO:teuthology.orchestra.run.vm00.stdout: osd: 8 osds: 8 up (since 15m), 8 in (since 16m) 2026-03-10T13:22:33.251 INFO:teuthology.orchestra.run.vm00.stdout: rgw: 2 daemons active (2 hosts, 1 zones) 2026-03-10T13:22:33.251 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T13:22:33.251 INFO:teuthology.orchestra.run.vm00.stdout: data: 2026-03-10T13:22:33.251 INFO:teuthology.orchestra.run.vm00.stdout: pools: 6 pools, 161 pgs 2026-03-10T13:22:33.251 INFO:teuthology.orchestra.run.vm00.stdout: objects: 209 objects, 457 KiB 2026-03-10T13:22:33.251 INFO:teuthology.orchestra.run.vm00.stdout: usage: 95 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:22:33.251 INFO:teuthology.orchestra.run.vm00.stdout: pgs: 161 active+clean 2026-03-10T13:22:33.251 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T13:22:33.251 INFO:teuthology.orchestra.run.vm00.stdout: io: 2026-03-10T13:22:33.251 INFO:teuthology.orchestra.run.vm00.stdout: client: 1.2 KiB/s rd, 1 op/s rd, 0 op/s wr 2026-03-10T13:22:33.251 INFO:teuthology.orchestra.run.vm00.stdout: 2026-03-10T13:22:33.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:32 vm08 ceph-mon[49535]: from='client.15096 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:22:33.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:32 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/255739050' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:22:33.309 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph health detail' 2026-03-10T13:22:33.957 INFO:teuthology.orchestra.run.vm00.stdout:HEALTH_OK 2026-03-10T13:22:34.022 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.mgr | length == 1'"'"'' 2026-03-10T13:22:34.127 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:33 vm00 ceph-mon[47364]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:22:34.127 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:33 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2374797857' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-10T13:22:34.127 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:33 vm00 ceph-mon[51670]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:22:34.127 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:33 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2374797857' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-10T13:22:34.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:33 vm08 ceph-mon[49535]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:22:34.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:33 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/2374797857' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-10T13:22:34.617 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T13:22:34.685 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph mgr fail' 2026-03-10T13:22:34.908 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:34 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/265531689' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T13:22:34.908 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:34 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/1725548905' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:22:34.908 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:34 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/265531689' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T13:22:34.908 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:34 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/1725548905' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:22:35.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:34 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/265531689' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T13:22:35.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:34 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/1725548905' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:22:35.946 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:35 vm00 ceph-mon[47364]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:22:35.946 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:35 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/3565463658' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-10T13:22:35.946 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:35 vm00 ceph-mon[47364]: from='client.? ' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-10T13:22:35.946 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:35 vm00 ceph-mon[47364]: osdmap e87: 8 total, 8 up, 8 in 2026-03-10T13:22:35.946 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:35 vm00 ceph-mon[47364]: Standby manager daemon y started 2026-03-10T13:22:35.946 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:35 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:22:35] "GET /metrics HTTP/1.1" 200 37547 "" "Prometheus/2.51.0" 2026-03-10T13:22:35.946 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:35 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:22:35.901+0000 7fea44ad4640 -1 mgr handle_mgr_map I was active but no longer am 2026-03-10T13:22:35.946 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:35 vm00 ceph-mon[51670]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:22:35.947 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:35 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/3565463658' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-10T13:22:35.947 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:35 vm00 ceph-mon[51670]: from='client.? ' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-10T13:22:35.947 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:35 vm00 ceph-mon[51670]: osdmap e87: 8 total, 8 up, 8 in 2026-03-10T13:22:35.947 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:35 vm00 ceph-mon[51670]: Standby manager daemon y started 2026-03-10T13:22:35.977 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'sleep 180' 2026-03-10T13:22:36.155 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:22:35 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:22:35] ENGINE Bus STOPPING 2026-03-10T13:22:36.155 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:22:36 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:22:36] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-10T13:22:36.155 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:22:36 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:22:36] ENGINE Bus STOPPED 2026-03-10T13:22:36.155 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:35 vm08 ceph-mon[49535]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:22:36.155 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:35 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/3565463658' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-10T13:22:36.155 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:35 vm08 ceph-mon[49535]: from='client.? ' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-10T13:22:36.155 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:35 vm08 ceph-mon[49535]: osdmap e87: 8 total, 8 up, 8 in 2026-03-10T13:22:36.155 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:35 vm08 ceph-mon[49535]: Standby manager daemon y started 2026-03-10T13:22:36.198 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:35 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ignoring --setuser ceph since I am not root 2026-03-10T13:22:36.198 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:35 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ignoring --setgroup ceph since I am not root 2026-03-10T13:22:36.198 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:36 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:22:36.054+0000 7ff1cfcf2140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-10T13:22:36.198 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:36 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:22:36.099+0000 7ff1cfcf2140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T13:22:36.414 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:22:36 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:22:36] ENGINE Bus STARTING 2026-03-10T13:22:36.414 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:22:36 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:22:36] ENGINE Serving on http://:::9283 2026-03-10T13:22:36.414 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:22:36 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:22:36] ENGINE Bus STARTED 2026-03-10T13:22:36.753 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:36 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:22:36.579+0000 7ff1cfcf2140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T13:22:37.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[51670]: from='client.? ' entity='client.admin' cmd='[{"prefix": "mgr fail"}]': finished 2026-03-10T13:22:37.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[51670]: mgrmap e31: x(active, starting, since 0.668856s), standbys: y 2026-03-10T13:22:37.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:22:37.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:22:37.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:22:37.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T13:22:37.112 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T13:22:37.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T13:22:37.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T13:22:37.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T13:22:37.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:22:37.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:22:37.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:22:37.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:22:37.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:22:37.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T13:22:37.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T13:22:37.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T13:22:37.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[51670]: Manager daemon x is now available 2026-03-10T13:22:37.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:22:37.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:22:37.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[51670]: from='mgr.24824 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:22:37.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-10T13:22:37.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[51670]: from='mgr.24824 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-10T13:22:37.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:36 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:22:36.965+0000 7ff1cfcf2140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T13:22:37.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:37 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-10T13:22:37.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:37 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-10T13:22:37.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:37 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: from numpy import show_config as show_numpy_config 2026-03-10T13:22:37.113 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:37 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:22:37.069+0000 7ff1cfcf2140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T13:22:37.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[47364]: from='client.? ' entity='client.admin' cmd='[{"prefix": "mgr fail"}]': finished 2026-03-10T13:22:37.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[47364]: mgrmap e31: x(active, starting, since 0.668856s), standbys: y 2026-03-10T13:22:37.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:22:37.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:22:37.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:22:37.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T13:22:37.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T13:22:37.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T13:22:37.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T13:22:37.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T13:22:37.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:22:37.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:22:37.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:22:37.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:22:37.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:22:37.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T13:22:37.114 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T13:22:37.114 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T13:22:37.114 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[47364]: Manager daemon x is now available 2026-03-10T13:22:37.114 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:22:37.114 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:22:37.114 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[47364]: from='mgr.24824 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:22:37.114 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-10T13:22:37.114 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:36 vm00 ceph-mon[47364]: from='mgr.24824 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-10T13:22:37.114 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:22:36 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:22:36.969Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:22:37.114 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:22:36 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:22:36.970Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:22:37.177 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:36 vm08 ceph-mon[49535]: from='client.? ' entity='client.admin' cmd='[{"prefix": "mgr fail"}]': finished 2026-03-10T13:22:37.177 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:36 vm08 ceph-mon[49535]: mgrmap e31: x(active, starting, since 0.668856s), standbys: y 2026-03-10T13:22:37.177 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:36 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:22:37.177 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:36 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:22:37.177 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:36 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:22:37.177 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:36 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T13:22:37.177 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:36 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T13:22:37.177 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:36 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T13:22:37.177 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:36 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T13:22:37.177 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:36 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T13:22:37.177 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:36 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:22:37.177 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:36 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:22:37.177 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:36 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:22:37.177 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:36 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:22:37.177 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:36 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:22:37.177 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:36 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T13:22:37.177 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:36 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T13:22:37.177 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:36 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T13:22:37.177 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:36 vm08 ceph-mon[49535]: Manager daemon x is now available 2026-03-10T13:22:37.177 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:36 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:22:37.177 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:36 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:22:37.177 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:36 vm08 ceph-mon[49535]: from='mgr.24824 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:22:37.177 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:36 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-10T13:22:37.177 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:36 vm08 ceph-mon[49535]: from='mgr.24824 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-10T13:22:37.503 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:37 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:22:37.110+0000 7ff1cfcf2140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T13:22:37.503 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:37 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:22:37.191+0000 7ff1cfcf2140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T13:22:37.934 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:37 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:22:37.788+0000 7ff1cfcf2140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T13:22:37.934 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:37 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:22:37.931+0000 7ff1cfcf2140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T13:22:38.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:37 vm08 ceph-mon[49535]: mgrmap e32: x(active, since 1.73104s), standbys: y 2026-03-10T13:22:38.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:37 vm08 ceph-mon[49535]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:22:38.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:37 vm00 ceph-mon[47364]: mgrmap e32: x(active, since 1.73104s), standbys: y 2026-03-10T13:22:38.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:37 vm00 ceph-mon[47364]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:22:38.253 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:37 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:22:37.983+0000 7ff1cfcf2140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T13:22:38.253 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:38 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:22:38.030+0000 7ff1cfcf2140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T13:22:38.253 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:38 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:22:38.077+0000 7ff1cfcf2140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T13:22:38.253 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:38 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:22:38.124+0000 7ff1cfcf2140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T13:22:38.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:37 vm00 ceph-mon[51670]: mgrmap e32: x(active, since 1.73104s), standbys: y 2026-03-10T13:22:38.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:37 vm00 ceph-mon[51670]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:22:38.678 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:38 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:22:38.341+0000 7ff1cfcf2140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T13:22:38.678 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:38 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:22:38.403+0000 7ff1cfcf2140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T13:22:39.002 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:38 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:22:38.674+0000 7ff1cfcf2140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-10T13:22:39.283 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:39 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:22:39.030+0000 7ff1cfcf2140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T13:22:39.284 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:39 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:22:39.075+0000 7ff1cfcf2140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T13:22:39.284 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:39 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:22:39.122+0000 7ff1cfcf2140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T13:22:39.284 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:39 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:22:39.239+0000 7ff1cfcf2140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T13:22:39.285 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[51670]: [10/Mar/2026:13:22:37] ENGINE Bus STARTING 2026-03-10T13:22:39.285 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[51670]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:22:39.285 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[51670]: [10/Mar/2026:13:22:37] ENGINE Serving on http://192.168.123.108:8765 2026-03-10T13:22:39.285 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[51670]: [10/Mar/2026:13:22:38] ENGINE Serving on https://192.168.123.108:7150 2026-03-10T13:22:39.285 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[51670]: [10/Mar/2026:13:22:38] ENGINE Bus STARTED 2026-03-10T13:22:39.285 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[51670]: [10/Mar/2026:13:22:38] ENGINE Client ('192.168.123.108', 43986) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T13:22:39.285 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[51670]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:39.285 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[51670]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:39.285 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[51670]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:22:39.285 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[51670]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:39.285 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[51670]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:39.285 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[51670]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:39.285 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[51670]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:39.285 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:22:39.285 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[51670]: from='mgr.24824 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:22:39.285 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[51670]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:39.285 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[51670]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:39.285 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:22:39.285 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[51670]: from='mgr.24824 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:22:39.285 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:22:39.285 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:22:39.285 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[47364]: [10/Mar/2026:13:22:37] ENGINE Bus STARTING 2026-03-10T13:22:39.285 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[47364]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:22:39.285 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[47364]: [10/Mar/2026:13:22:37] ENGINE Serving on http://192.168.123.108:8765 2026-03-10T13:22:39.285 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[47364]: [10/Mar/2026:13:22:38] ENGINE Serving on https://192.168.123.108:7150 2026-03-10T13:22:39.285 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[47364]: [10/Mar/2026:13:22:38] ENGINE Bus STARTED 2026-03-10T13:22:39.286 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[47364]: [10/Mar/2026:13:22:38] ENGINE Client ('192.168.123.108', 43986) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T13:22:39.286 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[47364]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:39.286 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[47364]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:39.286 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[47364]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:22:39.286 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[47364]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:39.286 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[47364]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:39.286 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[47364]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:39.286 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[47364]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:39.286 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:22:39.286 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[47364]: from='mgr.24824 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:22:39.286 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[47364]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:39.286 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[47364]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:39.286 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:22:39.286 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[47364]: from='mgr.24824 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:22:39.286 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:22:39.286 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:39 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:22:39.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:39 vm08 ceph-mon[49535]: [10/Mar/2026:13:22:37] ENGINE Bus STARTING 2026-03-10T13:22:39.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:39 vm08 ceph-mon[49535]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:22:39.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:39 vm08 ceph-mon[49535]: [10/Mar/2026:13:22:37] ENGINE Serving on http://192.168.123.108:8765 2026-03-10T13:22:39.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:39 vm08 ceph-mon[49535]: [10/Mar/2026:13:22:38] ENGINE Serving on https://192.168.123.108:7150 2026-03-10T13:22:39.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:39 vm08 ceph-mon[49535]: [10/Mar/2026:13:22:38] ENGINE Bus STARTED 2026-03-10T13:22:39.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:39 vm08 ceph-mon[49535]: [10/Mar/2026:13:22:38] ENGINE Client ('192.168.123.108', 43986) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T13:22:39.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:39 vm08 ceph-mon[49535]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:39.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:39 vm08 ceph-mon[49535]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:39.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:39 vm08 ceph-mon[49535]: from='client.24848 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:22:39.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:39 vm08 ceph-mon[49535]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:39.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:39 vm08 ceph-mon[49535]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:39.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:39 vm08 ceph-mon[49535]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:39.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:39 vm08 ceph-mon[49535]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:39.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:39 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:22:39.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:39 vm08 ceph-mon[49535]: from='mgr.24824 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:22:39.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:39 vm08 ceph-mon[49535]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:39.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:39 vm08 ceph-mon[49535]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:39.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:39 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:22:39.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:39 vm08 ceph-mon[49535]: from='mgr.24824 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:22:39.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:39 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:22:39.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:39 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:22:39.694 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:39 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:22:39.281+0000 7ff1cfcf2140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T13:22:39.694 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:39 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:22:39.377+0000 7ff1cfcf2140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T13:22:39.694 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:39 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:22:39.512+0000 7ff1cfcf2140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T13:22:39.694 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:39 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:22:39.691+0000 7ff1cfcf2140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T13:22:39.953 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:39 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:22:39.740+0000 7ff1cfcf2140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T13:22:39.953 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:39 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:22:39] ENGINE Bus STARTING 2026-03-10T13:22:39.953 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:39 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: CherryPy Checker: 2026-03-10T13:22:39.953 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:39 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: The Application mounted at '' has an empty config. 2026-03-10T13:22:39.953 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:39 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:22:39.953 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:39 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:22:39] ENGINE Serving on http://:::9283 2026-03-10T13:22:39.953 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:22:39 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:22:39] ENGINE Bus STARTED 2026-03-10T13:22:40.206 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[47364]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T13:22:40.206 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[47364]: Updating vm08:/etc/ceph/ceph.conf 2026-03-10T13:22:40.206 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[47364]: Updating vm08:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.conf 2026-03-10T13:22:40.206 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[47364]: Updating vm00:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.conf 2026-03-10T13:22:40.206 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[47364]: Updating vm08:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:22:40.206 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[47364]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:22:40.206 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[47364]: mgrmap e33: x(active, since 3s), standbys: y 2026-03-10T13:22:40.206 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[47364]: Updating vm08:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.client.admin.keyring 2026-03-10T13:22:40.206 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[47364]: Updating vm00:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.client.admin.keyring 2026-03-10T13:22:40.206 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[47364]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:40.206 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[47364]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:40.206 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[47364]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:40.206 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[47364]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:40.206 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[47364]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:40.206 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[47364]: Reconfiguring iscsi.foo.vm00.dezodo (dependencies changed)... 2026-03-10T13:22:40.206 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:22:40.206 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[47364]: from='mgr.24824 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:22:40.206 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:22:40.206 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[47364]: Reconfiguring daemon iscsi.foo.vm00.dezodo on vm00 2026-03-10T13:22:40.206 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[47364]: Standby manager daemon y restarted 2026-03-10T13:22:40.206 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[47364]: Standby manager daemon y started 2026-03-10T13:22:40.206 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[47364]: from='mgr.? 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-10T13:22:40.206 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[47364]: from='mgr.? 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T13:22:40.206 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[47364]: from='mgr.? 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-10T13:22:40.206 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[47364]: from='mgr.? 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T13:22:40.211 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[51670]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T13:22:40.211 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[51670]: Updating vm08:/etc/ceph/ceph.conf 2026-03-10T13:22:40.211 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[51670]: Updating vm08:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.conf 2026-03-10T13:22:40.212 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[51670]: Updating vm00:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.conf 2026-03-10T13:22:40.212 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[51670]: Updating vm08:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:22:40.212 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[51670]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:22:40.212 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[51670]: mgrmap e33: x(active, since 3s), standbys: y 2026-03-10T13:22:40.212 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[51670]: Updating vm08:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.client.admin.keyring 2026-03-10T13:22:40.212 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[51670]: Updating vm00:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.client.admin.keyring 2026-03-10T13:22:40.212 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[51670]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:40.212 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[51670]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:40.212 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[51670]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:40.212 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[51670]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:40.212 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[51670]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:40.212 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[51670]: Reconfiguring iscsi.foo.vm00.dezodo (dependencies changed)... 2026-03-10T13:22:40.212 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:22:40.212 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[51670]: from='mgr.24824 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:22:40.212 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:22:40.212 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[51670]: Reconfiguring daemon iscsi.foo.vm00.dezodo on vm00 2026-03-10T13:22:40.212 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[51670]: Standby manager daemon y restarted 2026-03-10T13:22:40.212 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[51670]: Standby manager daemon y started 2026-03-10T13:22:40.212 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-10T13:22:40.212 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T13:22:40.212 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-10T13:22:40.212 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:40 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T13:22:40.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:40 vm08 ceph-mon[49535]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T13:22:40.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:40 vm08 ceph-mon[49535]: Updating vm08:/etc/ceph/ceph.conf 2026-03-10T13:22:40.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:40 vm08 ceph-mon[49535]: Updating vm08:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.conf 2026-03-10T13:22:40.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:40 vm08 ceph-mon[49535]: Updating vm00:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.conf 2026-03-10T13:22:40.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:40 vm08 ceph-mon[49535]: Updating vm08:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:22:40.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:40 vm08 ceph-mon[49535]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:22:40.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:40 vm08 ceph-mon[49535]: mgrmap e33: x(active, since 3s), standbys: y 2026-03-10T13:22:40.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:40 vm08 ceph-mon[49535]: Updating vm08:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.client.admin.keyring 2026-03-10T13:22:40.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:40 vm08 ceph-mon[49535]: Updating vm00:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.client.admin.keyring 2026-03-10T13:22:40.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:40 vm08 ceph-mon[49535]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:40.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:40 vm08 ceph-mon[49535]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:40.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:40 vm08 ceph-mon[49535]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:40.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:40 vm08 ceph-mon[49535]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:40.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:40 vm08 ceph-mon[49535]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:40.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:40 vm08 ceph-mon[49535]: Reconfiguring iscsi.foo.vm00.dezodo (dependencies changed)... 2026-03-10T13:22:40.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:40 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:22:40.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:40 vm08 ceph-mon[49535]: from='mgr.24824 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:22:40.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:40 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:22:40.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:40 vm08 ceph-mon[49535]: Reconfiguring daemon iscsi.foo.vm00.dezodo on vm00 2026-03-10T13:22:40.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:40 vm08 ceph-mon[49535]: Standby manager daemon y restarted 2026-03-10T13:22:40.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:40 vm08 ceph-mon[49535]: Standby manager daemon y started 2026-03-10T13:22:40.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:40 vm08 ceph-mon[49535]: from='mgr.? 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-10T13:22:40.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:40 vm08 ceph-mon[49535]: from='mgr.? 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T13:22:40.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:40 vm08 ceph-mon[49535]: from='mgr.? 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-10T13:22:40.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:40 vm08 ceph-mon[49535]: from='mgr.? 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T13:22:41.017 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:40 vm08 systemd[1]: Stopping Ceph prometheus.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:22:41.268 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[75012]: ts=2026-03-10T13:22:41.015Z caller=main.go:964 level=warn msg="Received SIGTERM, exiting gracefully..." 2026-03-10T13:22:41.268 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[75012]: ts=2026-03-10T13:22:41.015Z caller=main.go:988 level=info msg="Stopping scrape discovery manager..." 2026-03-10T13:22:41.268 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[75012]: ts=2026-03-10T13:22:41.015Z caller=main.go:1002 level=info msg="Stopping notify discovery manager..." 2026-03-10T13:22:41.268 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[75012]: ts=2026-03-10T13:22:41.015Z caller=manager.go:177 level=info component="rule manager" msg="Stopping rule manager..." 2026-03-10T13:22:41.268 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[75012]: ts=2026-03-10T13:22:41.015Z caller=manager.go:187 level=info component="rule manager" msg="Rule manager stopped" 2026-03-10T13:22:41.268 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[75012]: ts=2026-03-10T13:22:41.016Z caller=main.go:1039 level=info msg="Stopping scrape manager..." 2026-03-10T13:22:41.268 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[75012]: ts=2026-03-10T13:22:41.016Z caller=main.go:984 level=info msg="Scrape discovery manager stopped" 2026-03-10T13:22:41.268 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[75012]: ts=2026-03-10T13:22:41.016Z caller=main.go:998 level=info msg="Notify discovery manager stopped" 2026-03-10T13:22:41.268 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[75012]: ts=2026-03-10T13:22:41.019Z caller=main.go:1031 level=info msg="Scrape manager stopped" 2026-03-10T13:22:41.268 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[75012]: ts=2026-03-10T13:22:41.020Z caller=notifier.go:618 level=info component=notifier msg="Stopping notification manager..." 2026-03-10T13:22:41.268 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[75012]: ts=2026-03-10T13:22:41.020Z caller=main.go:1261 level=info msg="Notifier manager stopped" 2026-03-10T13:22:41.268 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[75012]: ts=2026-03-10T13:22:41.020Z caller=main.go:1273 level=info msg="See you next time!" 2026-03-10T13:22:41.268 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 podman[77145]: 2026-03-10 13:22:41.031230716 +0000 UTC m=+0.031955266 container died 3b07f384b4b6a851f9f8fc9ff353eacda99d87677443222714b01813a683dcb2 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T13:22:41.268 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 podman[77145]: 2026-03-10 13:22:41.050448734 +0000 UTC m=+0.051173284 container remove 3b07f384b4b6a851f9f8fc9ff353eacda99d87677443222714b01813a683dcb2 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T13:22:41.268 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 bash[77145]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a 2026-03-10T13:22:41.268 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@prometheus.a.service: Deactivated successfully. 2026-03-10T13:22:41.268 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 systemd[1]: Stopped Ceph prometheus.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:22:41.268 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 systemd[1]: Starting Ceph prometheus.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:22:41.268 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 podman[77212]: 2026-03-10 13:22:41.239063895 +0000 UTC m=+0.024149581 container create e1b806e63eed211791fd17781451bf1fa807254309561224c93f358a9780a180 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T13:22:41.683 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:41 vm00 ceph-mon[51670]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:22:41.683 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:41 vm00 ceph-mon[51670]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:41.683 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:41 vm00 ceph-mon[51670]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:41.683 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:41 vm00 ceph-mon[51670]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-10T13:22:41.683 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:41 vm00 ceph-mon[51670]: mgrmap e34: x(active, since 5s), standbys: y 2026-03-10T13:22:41.683 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:41 vm00 ceph-mon[51670]: Reconfiguring daemon prometheus.a on vm08 2026-03-10T13:22:41.683 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:41 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/3354548326' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T13:22:41.683 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:41 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/1813262855' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2438902687"}]: dispatch 2026-03-10T13:22:41.683 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:41 vm00 ceph-mon[51670]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2438902687"}]: dispatch 2026-03-10T13:22:41.683 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:41 vm00 ceph-mon[51670]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:41.683 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:41 vm00 ceph-mon[51670]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:41.683 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:41 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T13:22:41.683 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:41 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T13:22:41.683 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:41 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T13:22:41.683 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:41 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-10T13:22:41.683 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:41 vm00 ceph-mon[51670]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:41.687 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:41 vm00 ceph-mon[47364]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:22:41.687 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:41 vm00 ceph-mon[47364]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:41.687 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:41 vm00 ceph-mon[47364]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:41.687 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:41 vm00 ceph-mon[47364]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-10T13:22:41.687 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:41 vm00 ceph-mon[47364]: mgrmap e34: x(active, since 5s), standbys: y 2026-03-10T13:22:41.687 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:41 vm00 ceph-mon[47364]: Reconfiguring daemon prometheus.a on vm08 2026-03-10T13:22:41.687 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:41 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/3354548326' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T13:22:41.687 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:41 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/1813262855' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2438902687"}]: dispatch 2026-03-10T13:22:41.687 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:41 vm00 ceph-mon[47364]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2438902687"}]: dispatch 2026-03-10T13:22:41.687 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:41 vm00 ceph-mon[47364]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:41.687 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:41 vm00 ceph-mon[47364]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:41.687 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:41 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T13:22:41.687 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:41 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T13:22:41.688 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:41 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T13:22:41.688 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:41 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-10T13:22:41.688 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:41 vm00 ceph-mon[47364]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:41.706 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:41 vm08 ceph-mon[49535]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:22:41.706 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:41 vm08 ceph-mon[49535]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:41.706 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:41 vm08 ceph-mon[49535]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:41.707 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:41 vm08 ceph-mon[49535]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-10T13:22:41.707 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:41 vm08 ceph-mon[49535]: mgrmap e34: x(active, since 5s), standbys: y 2026-03-10T13:22:41.707 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:41 vm08 ceph-mon[49535]: Reconfiguring daemon prometheus.a on vm08 2026-03-10T13:22:41.707 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:41 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/3354548326' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T13:22:41.707 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:41 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/1813262855' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2438902687"}]: dispatch 2026-03-10T13:22:41.707 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:41 vm08 ceph-mon[49535]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2438902687"}]: dispatch 2026-03-10T13:22:41.707 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:41 vm08 ceph-mon[49535]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:41.707 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:41 vm08 ceph-mon[49535]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:41.707 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:41 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T13:22:41.707 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:41 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T13:22:41.707 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:41 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T13:22:41.707 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:41 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-10T13:22:41.707 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:41 vm08 ceph-mon[49535]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:41.707 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:22:41] ENGINE Bus STOPPING 2026-03-10T13:22:41.707 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 podman[77212]: 2026-03-10 13:22:41.270518333 +0000 UTC m=+0.055604019 container init e1b806e63eed211791fd17781451bf1fa807254309561224c93f358a9780a180 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T13:22:41.707 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 podman[77212]: 2026-03-10 13:22:41.273093262 +0000 UTC m=+0.058178948 container start e1b806e63eed211791fd17781451bf1fa807254309561224c93f358a9780a180 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T13:22:41.707 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 bash[77212]: e1b806e63eed211791fd17781451bf1fa807254309561224c93f358a9780a180 2026-03-10T13:22:41.707 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 podman[77212]: 2026-03-10 13:22:41.227877383 +0000 UTC m=+0.012963078 image pull 1d3b7f56885b6dd623f1785be963aa9c195f86bc256ea454e8d02a7980b79c53 quay.io/prometheus/prometheus:v2.51.0 2026-03-10T13:22:41.707 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 systemd[1]: Started Ceph prometheus.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:22:41.707 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:22:41.309Z caller=main.go:617 level=info msg="Starting Prometheus Server" mode=server version="(version=2.51.0, branch=HEAD, revision=c05c15512acb675e3f6cd662a6727854e93fc024)" 2026-03-10T13:22:41.707 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:22:41.309Z caller=main.go:622 level=info build_context="(go=go1.22.1, platform=linux/amd64, user=root@b5723e458358, date=20240319-10:54:45, tags=netgo,builtinassets,stringlabels)" 2026-03-10T13:22:41.707 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:22:41.309Z caller=main.go:623 level=info host_details="(Linux 5.14.0-686.el9.x86_64 #1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026 x86_64 vm08 (none))" 2026-03-10T13:22:41.707 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:22:41.309Z caller=main.go:624 level=info fd_limits="(soft=1048576, hard=1048576)" 2026-03-10T13:22:41.707 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:22:41.309Z caller=main.go:625 level=info vm_limits="(soft=unlimited, hard=unlimited)" 2026-03-10T13:22:41.707 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:22:41.316Z caller=web.go:568 level=info component=web msg="Start listening for connections" address=:9095 2026-03-10T13:22:41.707 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:22:41.316Z caller=main.go:1129 level=info msg="Starting TSDB ..." 2026-03-10T13:22:41.708 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:22:41.319Z caller=tls_config.go:313 level=info component=web msg="Listening on" address=[::]:9095 2026-03-10T13:22:41.708 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:22:41.319Z caller=tls_config.go:316 level=info component=web msg="TLS is disabled." http2=false address=[::]:9095 2026-03-10T13:22:41.708 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:22:41.320Z caller=head.go:616 level=info component=tsdb msg="Replaying on-disk memory mappable chunks if any" 2026-03-10T13:22:41.708 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:22:41.320Z caller=head.go:698 level=info component=tsdb msg="On-disk memory mappable chunks replay completed" duration=1.583µs 2026-03-10T13:22:41.708 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:22:41.320Z caller=head.go:706 level=info component=tsdb msg="Replaying WAL, this may take a while" 2026-03-10T13:22:41.708 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:22:41.330Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=0 maxSegment=4 2026-03-10T13:22:41.708 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:22:41.352Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=1 maxSegment=4 2026-03-10T13:22:41.708 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:22:41.368Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=2 maxSegment=4 2026-03-10T13:22:41.708 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:22:41.372Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=3 maxSegment=4 2026-03-10T13:22:41.708 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:22:41.372Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=4 maxSegment=4 2026-03-10T13:22:41.708 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:22:41.372Z caller=head.go:815 level=info component=tsdb msg="WAL replay completed" checkpoint_replay_duration=19.928µs wal_replay_duration=52.038832ms wbl_replay_duration=150ns total_replay_duration=52.09633ms 2026-03-10T13:22:41.708 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:22:41.378Z caller=main.go:1150 level=info fs_type=XFS_SUPER_MAGIC 2026-03-10T13:22:41.708 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:22:41.378Z caller=main.go:1153 level=info msg="TSDB started" 2026-03-10T13:22:41.708 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:22:41.378Z caller=main.go:1335 level=info msg="Loading configuration file" filename=/etc/prometheus/prometheus.yml 2026-03-10T13:22:41.708 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:22:41.394Z caller=main.go:1372 level=info msg="Completed loading of configuration file" filename=/etc/prometheus/prometheus.yml totalDuration=15.829665ms db_storage=801ns remote_storage=1.273µs web_handler=441ns query_engine=942ns scrape=858.577µs scrape_sd=192.8µs notify=8.256µs notify_sd=9.388µs rules=14.456775ms tracing=6.572µs 2026-03-10T13:22:41.708 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:22:41.394Z caller=main.go:1114 level=info msg="Server is ready to receive web requests." 2026-03-10T13:22:41.708 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:22:41.394Z caller=manager.go:163 level=info component="rule manager" msg="Starting rule manager..." 2026-03-10T13:22:41.967 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:22:41] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-10T13:22:41.967 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:22:41] ENGINE Bus STOPPED 2026-03-10T13:22:41.967 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:22:41] ENGINE Bus STARTING 2026-03-10T13:22:41.967 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:22:41] ENGINE Serving on http://:::9283 2026-03-10T13:22:41.967 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:22:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:22:41] ENGINE Bus STARTED 2026-03-10T13:22:42.514 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:42 vm00 ceph-mon[47364]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T13:22:42.517 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:42 vm08 ceph-mon[49535]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T13:22:42.517 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:42 vm08 ceph-mon[49535]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T13:22:42.517 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:42 vm08 ceph-mon[49535]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T13:22:42.517 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:42 vm08 ceph-mon[49535]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-10T13:22:42.517 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:42 vm08 ceph-mon[49535]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2438902687"}]': finished 2026-03-10T13:22:42.517 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:42 vm08 ceph-mon[49535]: osdmap e88: 8 total, 8 up, 8 in 2026-03-10T13:22:42.518 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:42 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/3022094254' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2138124590"}]: dispatch 2026-03-10T13:22:42.518 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:42 vm08 ceph-mon[49535]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2138124590"}]: dispatch 2026-03-10T13:22:42.820 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:42 vm00 ceph-mon[51670]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T13:22:42.820 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:42 vm00 ceph-mon[51670]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T13:22:42.820 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:42 vm00 ceph-mon[51670]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T13:22:42.820 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:42 vm00 ceph-mon[51670]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-10T13:22:42.820 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:42 vm00 ceph-mon[51670]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2438902687"}]': finished 2026-03-10T13:22:42.820 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:42 vm00 ceph-mon[51670]: osdmap e88: 8 total, 8 up, 8 in 2026-03-10T13:22:42.820 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:42 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/3022094254' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2138124590"}]: dispatch 2026-03-10T13:22:42.820 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:42 vm00 ceph-mon[51670]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2138124590"}]: dispatch 2026-03-10T13:22:42.821 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:42 vm00 ceph-mon[47364]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T13:22:42.821 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:42 vm00 ceph-mon[47364]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T13:22:42.821 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:42 vm00 ceph-mon[47364]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-10T13:22:42.821 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:42 vm00 ceph-mon[47364]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2438902687"}]': finished 2026-03-10T13:22:42.821 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:42 vm00 ceph-mon[47364]: osdmap e88: 8 total, 8 up, 8 in 2026-03-10T13:22:42.821 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:42 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/3022094254' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2138124590"}]: dispatch 2026-03-10T13:22:42.821 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:42 vm00 ceph-mon[47364]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2138124590"}]: dispatch 2026-03-10T13:22:44.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:43 vm00 ceph-mon[47364]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:22:44.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:43 vm00 ceph-mon[47364]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2138124590"}]': finished 2026-03-10T13:22:44.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:43 vm00 ceph-mon[47364]: osdmap e89: 8 total, 8 up, 8 in 2026-03-10T13:22:44.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:43 vm00 ceph-mon[47364]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:44.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:43 vm00 ceph-mon[47364]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:44.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:43 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2602340889' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/242732414"}]: dispatch 2026-03-10T13:22:44.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:43 vm00 ceph-mon[47364]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:44.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:43 vm00 ceph-mon[47364]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:44.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:43 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:22:44.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:43 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:22:44.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:43 vm00 ceph-mon[47364]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:44.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:43 vm00 ceph-mon[51670]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:22:44.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:43 vm00 ceph-mon[51670]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2138124590"}]': finished 2026-03-10T13:22:44.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:43 vm00 ceph-mon[51670]: osdmap e89: 8 total, 8 up, 8 in 2026-03-10T13:22:44.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:43 vm00 ceph-mon[51670]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:44.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:43 vm00 ceph-mon[51670]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:44.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:43 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2602340889' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/242732414"}]: dispatch 2026-03-10T13:22:44.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:43 vm00 ceph-mon[51670]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:44.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:43 vm00 ceph-mon[51670]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:44.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:43 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:22:44.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:43 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:22:44.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:43 vm00 ceph-mon[51670]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:44.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:43 vm08 ceph-mon[49535]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:22:44.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:43 vm08 ceph-mon[49535]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2138124590"}]': finished 2026-03-10T13:22:44.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:43 vm08 ceph-mon[49535]: osdmap e89: 8 total, 8 up, 8 in 2026-03-10T13:22:44.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:43 vm08 ceph-mon[49535]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:44.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:43 vm08 ceph-mon[49535]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:44.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:43 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/2602340889' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/242732414"}]: dispatch 2026-03-10T13:22:44.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:43 vm08 ceph-mon[49535]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:44.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:43 vm08 ceph-mon[49535]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:44.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:43 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:22:44.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:43 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:22:44.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:43 vm08 ceph-mon[49535]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:22:45.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:44 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2602340889' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/242732414"}]': finished 2026-03-10T13:22:45.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:44 vm00 ceph-mon[47364]: osdmap e90: 8 total, 8 up, 8 in 2026-03-10T13:22:45.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:44 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2276290048' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1770265346"}]: dispatch 2026-03-10T13:22:45.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:44 vm00 ceph-mon[47364]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1770265346"}]: dispatch 2026-03-10T13:22:45.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:44 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2602340889' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/242732414"}]': finished 2026-03-10T13:22:45.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:44 vm00 ceph-mon[51670]: osdmap e90: 8 total, 8 up, 8 in 2026-03-10T13:22:45.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:44 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2276290048' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1770265346"}]: dispatch 2026-03-10T13:22:45.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:44 vm00 ceph-mon[51670]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1770265346"}]: dispatch 2026-03-10T13:22:45.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:44 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/2602340889' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/242732414"}]': finished 2026-03-10T13:22:45.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:44 vm08 ceph-mon[49535]: osdmap e90: 8 total, 8 up, 8 in 2026-03-10T13:22:45.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:44 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/2276290048' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1770265346"}]: dispatch 2026-03-10T13:22:45.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:44 vm08 ceph-mon[49535]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1770265346"}]: dispatch 2026-03-10T13:22:46.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:45 vm00 ceph-mon[47364]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 30 KiB/s rd, 0 B/s wr, 13 op/s 2026-03-10T13:22:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:45 vm00 ceph-mon[47364]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1770265346"}]': finished 2026-03-10T13:22:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:45 vm00 ceph-mon[47364]: osdmap e91: 8 total, 8 up, 8 in 2026-03-10T13:22:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:45 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2553493968' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/242732414"}]: dispatch 2026-03-10T13:22:46.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:45 vm00 ceph-mon[51670]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 30 KiB/s rd, 0 B/s wr, 13 op/s 2026-03-10T13:22:46.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:45 vm00 ceph-mon[51670]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1770265346"}]': finished 2026-03-10T13:22:46.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:45 vm00 ceph-mon[51670]: osdmap e91: 8 total, 8 up, 8 in 2026-03-10T13:22:46.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:45 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2553493968' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/242732414"}]: dispatch 2026-03-10T13:22:46.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:45 vm08 ceph-mon[49535]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 30 KiB/s rd, 0 B/s wr, 13 op/s 2026-03-10T13:22:46.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:45 vm08 ceph-mon[49535]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1770265346"}]': finished 2026-03-10T13:22:46.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:45 vm08 ceph-mon[49535]: osdmap e91: 8 total, 8 up, 8 in 2026-03-10T13:22:46.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:45 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/2553493968' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/242732414"}]: dispatch 2026-03-10T13:22:47.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:46 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2553493968' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/242732414"}]': finished 2026-03-10T13:22:47.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:46 vm00 ceph-mon[47364]: osdmap e92: 8 total, 8 up, 8 in 2026-03-10T13:22:47.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:46 vm00 ceph-mon[47364]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 45 KiB/s rd, 0 B/s wr, 19 op/s 2026-03-10T13:22:47.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:46 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/4191837259' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2806883453"}]: dispatch 2026-03-10T13:22:47.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:46 vm00 ceph-mon[47364]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2806883453"}]: dispatch 2026-03-10T13:22:47.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:46 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2553493968' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/242732414"}]': finished 2026-03-10T13:22:47.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:46 vm00 ceph-mon[51670]: osdmap e92: 8 total, 8 up, 8 in 2026-03-10T13:22:47.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:46 vm00 ceph-mon[51670]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 45 KiB/s rd, 0 B/s wr, 19 op/s 2026-03-10T13:22:47.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:46 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/4191837259' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2806883453"}]: dispatch 2026-03-10T13:22:47.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:46 vm00 ceph-mon[51670]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2806883453"}]: dispatch 2026-03-10T13:22:47.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:22:46 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:22:46.970Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:22:47.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:22:46 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:22:46.971Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:22:47.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:46 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/2553493968' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/242732414"}]': finished 2026-03-10T13:22:47.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:46 vm08 ceph-mon[49535]: osdmap e92: 8 total, 8 up, 8 in 2026-03-10T13:22:47.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:46 vm08 ceph-mon[49535]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 45 KiB/s rd, 0 B/s wr, 19 op/s 2026-03-10T13:22:47.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:46 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/4191837259' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2806883453"}]: dispatch 2026-03-10T13:22:47.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:46 vm08 ceph-mon[49535]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2806883453"}]: dispatch 2026-03-10T13:22:48.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:47 vm00 ceph-mon[47364]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2806883453"}]': finished 2026-03-10T13:22:48.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:47 vm00 ceph-mon[47364]: osdmap e93: 8 total, 8 up, 8 in 2026-03-10T13:22:48.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:47 vm00 ceph-mon[51670]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2806883453"}]': finished 2026-03-10T13:22:48.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:47 vm00 ceph-mon[51670]: osdmap e93: 8 total, 8 up, 8 in 2026-03-10T13:22:48.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:47 vm08 ceph-mon[49535]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2806883453"}]': finished 2026-03-10T13:22:48.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:47 vm08 ceph-mon[49535]: osdmap e93: 8 total, 8 up, 8 in 2026-03-10T13:22:49.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:22:49 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:22:49] "GET /metrics HTTP/1.1" 200 34745 "" "Prometheus/2.51.0" 2026-03-10T13:22:50.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:49 vm00 ceph-mon[47364]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:22:50.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:49 vm00 ceph-mon[51670]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:22:50.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:49 vm08 ceph-mon[49535]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:22:52.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:51 vm00 ceph-mon[47364]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:22:52.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:51 vm00 ceph-mon[47364]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:22:52.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:51 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:22:52.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:51 vm00 ceph-mon[51670]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:22:52.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:51 vm00 ceph-mon[51670]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:22:52.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:51 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:22:52.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:51 vm08 ceph-mon[49535]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:22:52.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:51 vm08 ceph-mon[49535]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:22:52.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:51 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:22:54.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:53 vm00 ceph-mon[47364]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 715 B/s rd, 0 op/s 2026-03-10T13:22:54.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:53 vm00 ceph-mon[51670]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 715 B/s rd, 0 op/s 2026-03-10T13:22:54.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:53 vm08 ceph-mon[49535]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 715 B/s rd, 0 op/s 2026-03-10T13:22:56.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:55 vm00 ceph-mon[47364]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:22:56.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:55 vm00 ceph-mon[51670]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:22:56.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:55 vm08 ceph-mon[49535]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:22:57.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:22:56 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:22:56.971Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:22:57.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:22:56 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:22:56.972Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:22:58.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:57 vm00 ceph-mon[47364]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T13:22:58.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:57 vm00 ceph-mon[51670]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T13:22:58.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:57 vm08 ceph-mon[49535]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T13:22:59.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:22:59 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:22:59] "GET /metrics HTTP/1.1" 200 37549 "" "Prometheus/2.51.0" 2026-03-10T13:23:00.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:22:59 vm00 ceph-mon[47364]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-10T13:23:00.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:22:59 vm00 ceph-mon[51670]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-10T13:23:00.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:22:59 vm08 ceph-mon[49535]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-10T13:23:02.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:01 vm00 ceph-mon[47364]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:02.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:01 vm00 ceph-mon[47364]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:23:02.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:01 vm00 ceph-mon[51670]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:02.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:01 vm00 ceph-mon[51670]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:23:02.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:01 vm08 ceph-mon[49535]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:02.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:01 vm08 ceph-mon[49535]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:23:04.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:03 vm00 ceph-mon[47364]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:04.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:03 vm00 ceph-mon[51670]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:04.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:03 vm08 ceph-mon[49535]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:06.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:05 vm08 ceph-mon[49535]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:23:06.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:05 vm00 ceph-mon[47364]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:23:06.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:05 vm00 ceph-mon[51670]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:23:07.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:23:06 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:23:06.972Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:23:07.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:23:06 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:23:06.974Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:23:07.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:07 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:23:07.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:07 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:23:07.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:07 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:23:08.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:08 vm00 ceph-mon[47364]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:08.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:08 vm00 ceph-mon[51670]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:08.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:08 vm08 ceph-mon[49535]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:09.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:23:09 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:23:09] "GET /metrics HTTP/1.1" 200 37543 "" "Prometheus/2.51.0" 2026-03-10T13:23:10.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:09 vm00 ceph-mon[47364]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:23:10.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:09 vm00 ceph-mon[51670]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:23:10.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:09 vm08 ceph-mon[49535]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:23:12.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:11 vm00 ceph-mon[47364]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:12.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:11 vm00 ceph-mon[47364]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:23:12.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:11 vm00 ceph-mon[51670]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:12.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:11 vm00 ceph-mon[51670]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:23:12.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:11 vm08 ceph-mon[49535]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:12.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:11 vm08 ceph-mon[49535]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:23:14.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:13 vm00 ceph-mon[47364]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:14.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:13 vm00 ceph-mon[51670]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:14.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:13 vm08 ceph-mon[49535]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:16.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:15 vm00 ceph-mon[47364]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:23:16.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:15 vm00 ceph-mon[51670]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:23:16.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:15 vm08 ceph-mon[49535]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:23:17.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:23:16 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:23:16.972Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:23:17.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:23:16 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:23:16.974Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:23:18.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:17 vm00 ceph-mon[47364]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:18.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:17 vm00 ceph-mon[51670]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:18.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:17 vm08 ceph-mon[49535]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:19.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:23:19 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:23:19] "GET /metrics HTTP/1.1" 200 37543 "" "Prometheus/2.51.0" 2026-03-10T13:23:20.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:19 vm00 ceph-mon[47364]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:23:20.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:19 vm00 ceph-mon[51670]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:23:20.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:19 vm08 ceph-mon[49535]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:23:21.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:20 vm00 ceph-mon[47364]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:21.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:20 vm00 ceph-mon[47364]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:23:21.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:20 vm00 ceph-mon[51670]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:21.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:20 vm00 ceph-mon[51670]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:23:21.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:20 vm08 ceph-mon[49535]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:21.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:20 vm08 ceph-mon[49535]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:23:22.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:21 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:23:22.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:21 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:23:22.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:21 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:23:23.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:22 vm00 ceph-mon[47364]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:23.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:22 vm00 ceph-mon[51670]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:23.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:22 vm08 ceph-mon[49535]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:26.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:25 vm00 ceph-mon[47364]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:23:26.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:25 vm00 ceph-mon[51670]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:23:26.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:25 vm08 ceph-mon[49535]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:23:27.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:23:26 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:23:26.973Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:23:27.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:23:26 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:23:26.974Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:23:28.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:27 vm00 ceph-mon[47364]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:28.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:27 vm00 ceph-mon[51670]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:28.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:27 vm08 ceph-mon[49535]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:29.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:23:29 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:23:29] "GET /metrics HTTP/1.1" 200 37537 "" "Prometheus/2.51.0" 2026-03-10T13:23:30.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:29 vm00 ceph-mon[47364]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:23:30.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:29 vm00 ceph-mon[51670]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:23:30.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:29 vm08 ceph-mon[49535]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:23:32.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:31 vm00 ceph-mon[47364]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:32.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:31 vm00 ceph-mon[47364]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:23:32.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:31 vm00 ceph-mon[51670]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:32.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:31 vm00 ceph-mon[51670]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:23:32.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:31 vm08 ceph-mon[49535]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:32.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:31 vm08 ceph-mon[49535]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:23:33.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:32 vm00 ceph-mon[47364]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:33.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:33 vm00 ceph-mon[51670]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:33.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:33 vm08 ceph-mon[49535]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:36.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:35 vm00 ceph-mon[47364]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:23:36.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:35 vm00 ceph-mon[51670]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:23:36.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:35 vm08 ceph-mon[49535]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:23:36.974 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:36 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:23:36.975 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:36 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:23:37.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:36 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:23:37.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:23:36 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:23:36.974Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:23:37.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:23:36 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:23:36.974Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:23:38.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:37 vm00 ceph-mon[47364]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:38.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:37 vm00 ceph-mon[51670]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:38.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:37 vm08 ceph-mon[49535]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:39.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:23:39 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:23:39] "GET /metrics HTTP/1.1" 200 37541 "" "Prometheus/2.51.0" 2026-03-10T13:23:40.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:39 vm00 ceph-mon[47364]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:23:40.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:39 vm00 ceph-mon[51670]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:23:40.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:39 vm08 ceph-mon[49535]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:23:41.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:40 vm00 ceph-mon[47364]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:41.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:40 vm00 ceph-mon[47364]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:23:41.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:40 vm00 ceph-mon[51670]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:41.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:40 vm00 ceph-mon[51670]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:23:41.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:40 vm08 ceph-mon[49535]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:41.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:40 vm08 ceph-mon[49535]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:23:43.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:42 vm00 ceph-mon[47364]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:43.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:42 vm00 ceph-mon[51670]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:43.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:42 vm08 ceph-mon[49535]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:44.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:43 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:23:44.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:43 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:23:44.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:43 vm00 ceph-mon[47364]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:23:44.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:43 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:23:44.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:43 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:23:44.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:43 vm00 ceph-mon[51670]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:23:44.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:43 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:23:44.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:43 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:23:44.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:43 vm08 ceph-mon[49535]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:23:46.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:45 vm00 ceph-mon[47364]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:23:46.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:45 vm00 ceph-mon[51670]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:23:46.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:45 vm08 ceph-mon[49535]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:23:47.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:23:46 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:23:46.974Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:23:47.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:23:46 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:23:46.975Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:23:48.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:47 vm00 ceph-mon[47364]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:48.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:47 vm00 ceph-mon[51670]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:48.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:47 vm08 ceph-mon[49535]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:49.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:23:49 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:23:49] "GET /metrics HTTP/1.1" 200 37541 "" "Prometheus/2.51.0" 2026-03-10T13:23:50.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:49 vm00 ceph-mon[47364]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:23:50.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:49 vm00 ceph-mon[51670]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:23:50.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:49 vm08 ceph-mon[49535]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:23:52.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:51 vm00 ceph-mon[47364]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:52.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:51 vm00 ceph-mon[47364]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:23:52.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:51 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:23:52.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:51 vm00 ceph-mon[51670]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:52.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:51 vm00 ceph-mon[51670]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:23:52.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:51 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:23:52.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:51 vm08 ceph-mon[49535]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:52.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:51 vm08 ceph-mon[49535]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:23:52.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:51 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:23:54.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:53 vm00 ceph-mon[47364]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:54.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:53 vm00 ceph-mon[51670]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:54.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:53 vm08 ceph-mon[49535]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:56.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:55 vm00 ceph-mon[47364]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:23:56.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:55 vm00 ceph-mon[51670]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:23:56.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:55 vm08 ceph-mon[49535]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:23:57.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:23:56 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:23:56.975Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:23:57.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:23:56 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:23:56.975Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:23:58.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:57 vm00 ceph-mon[47364]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:58.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:57 vm00 ceph-mon[51670]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:58.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:57 vm08 ceph-mon[49535]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:23:59.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:23:59 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:23:59] "GET /metrics HTTP/1.1" 200 37541 "" "Prometheus/2.51.0" 2026-03-10T13:24:00.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:23:59 vm00 ceph-mon[47364]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:24:00.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:23:59 vm00 ceph-mon[51670]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:24:00.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:23:59 vm08 ceph-mon[49535]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:24:01.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:00 vm00 ceph-mon[47364]: pgmap v51: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:01.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:00 vm00 ceph-mon[47364]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:24:01.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:00 vm00 ceph-mon[51670]: pgmap v51: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:01.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:00 vm00 ceph-mon[51670]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:24:01.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:00 vm08 ceph-mon[49535]: pgmap v51: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:01.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:00 vm08 ceph-mon[49535]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:24:03.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:02 vm00 ceph-mon[47364]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:03.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:02 vm00 ceph-mon[51670]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:03.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:02 vm08 ceph-mon[49535]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:06.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:05 vm00 ceph-mon[47364]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:24:06.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:05 vm00 ceph-mon[51670]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:24:06.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:05 vm08 ceph-mon[49535]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:24:06.976 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:06 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:24:06.977 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:06 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:24:07.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:06 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:24:07.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:24:06 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:24:06.976Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:24:07.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:24:06 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:24:06.977Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:24:08.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:07 vm00 ceph-mon[47364]: pgmap v54: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:08.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:07 vm00 ceph-mon[51670]: pgmap v54: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:08.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:07 vm08 ceph-mon[49535]: pgmap v54: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:09.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:24:09 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:24:09] "GET /metrics HTTP/1.1" 200 37539 "" "Prometheus/2.51.0" 2026-03-10T13:24:10.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:09 vm00 ceph-mon[47364]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:24:10.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:09 vm00 ceph-mon[51670]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:24:10.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:09 vm08 ceph-mon[49535]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:24:12.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:11 vm00 ceph-mon[47364]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:12.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:11 vm00 ceph-mon[47364]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:24:12.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:11 vm00 ceph-mon[51670]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:12.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:11 vm00 ceph-mon[51670]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:24:12.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:11 vm08 ceph-mon[49535]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:12.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:11 vm08 ceph-mon[49535]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:24:14.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:13 vm00 ceph-mon[47364]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:14.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:13 vm00 ceph-mon[51670]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:14.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:13 vm08 ceph-mon[49535]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:16.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:15 vm00 ceph-mon[47364]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:24:16.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:15 vm00 ceph-mon[51670]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:24:16.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:15 vm08 ceph-mon[49535]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:24:17.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:24:16 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:24:16.976Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:24:17.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:24:16 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:24:16.977Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:24:18.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:17 vm08 ceph-mon[49535]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:18.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:17 vm00 ceph-mon[47364]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:18.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:17 vm00 ceph-mon[51670]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:19.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:24:19 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:24:19] "GET /metrics HTTP/1.1" 200 37539 "" "Prometheus/2.51.0" 2026-03-10T13:24:20.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:19 vm08 ceph-mon[49535]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:24:20.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:19 vm00 ceph-mon[47364]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:24:20.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:19 vm00 ceph-mon[51670]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:24:22.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:21 vm08 ceph-mon[49535]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:22.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:21 vm08 ceph-mon[49535]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:24:22.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:21 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:24:22.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:21 vm00 ceph-mon[47364]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:22.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:21 vm00 ceph-mon[47364]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:24:22.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:21 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:24:22.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:21 vm00 ceph-mon[51670]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:22.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:21 vm00 ceph-mon[51670]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:24:22.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:21 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:24:24.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:23 vm08 ceph-mon[49535]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:24.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:23 vm00 ceph-mon[47364]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:24.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:23 vm00 ceph-mon[51670]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:25.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:24 vm00 ceph-mon[47364]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:24:25.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:24 vm00 ceph-mon[51670]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:24:25.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:24 vm08 ceph-mon[49535]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:24:27.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:26 vm00 ceph-mon[47364]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:27.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:26 vm00 ceph-mon[51670]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:27.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:24:26 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:24:26.977Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:24:27.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:24:26 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:24:26.978Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:24:27.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:26 vm08 ceph-mon[49535]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:29.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:24:29 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:24:29] "GET /metrics HTTP/1.1" 200 37540 "" "Prometheus/2.51.0" 2026-03-10T13:24:30.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:29 vm00 ceph-mon[47364]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:24:30.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:29 vm00 ceph-mon[51670]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:24:30.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:29 vm08 ceph-mon[49535]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:24:32.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:31 vm00 ceph-mon[47364]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:32.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:31 vm00 ceph-mon[47364]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:24:32.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:31 vm00 ceph-mon[51670]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:32.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:31 vm00 ceph-mon[51670]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:24:32.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:31 vm08 ceph-mon[49535]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:32.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:31 vm08 ceph-mon[49535]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:24:34.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:33 vm00 ceph-mon[47364]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:34.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:33 vm00 ceph-mon[51670]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:34.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:33 vm08 ceph-mon[49535]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:35.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:34 vm00 ceph-mon[47364]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:24:35.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:34 vm00 ceph-mon[51670]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:24:35.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:34 vm08 ceph-mon[49535]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:24:37.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:36 vm00 ceph-mon[47364]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:37.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:36 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:24:37.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:36 vm00 ceph-mon[51670]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:37.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:36 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:24:37.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:24:36 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:24:36.977Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:24:37.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:24:36 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:24:36.978Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:24:37.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:36 vm08 ceph-mon[49535]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:37.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:36 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:24:39.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:24:39 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:24:39] "GET /metrics HTTP/1.1" 200 37538 "" "Prometheus/2.51.0" 2026-03-10T13:24:40.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:39 vm00 ceph-mon[47364]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:24:40.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:39 vm00 ceph-mon[51670]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:24:40.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:39 vm08 ceph-mon[49535]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:24:42.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:41 vm00 ceph-mon[47364]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:42.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:41 vm00 ceph-mon[47364]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:24:42.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:41 vm00 ceph-mon[51670]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:42.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:41 vm00 ceph-mon[51670]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:24:42.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:41 vm08 ceph-mon[49535]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:42.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:41 vm08 ceph-mon[49535]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:24:44.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:43 vm00 ceph-mon[47364]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:44.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:43 vm00 ceph-mon[51670]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:44.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:43 vm08 ceph-mon[49535]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:45.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:44 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:24:45.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:44 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:24:45.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:44 vm00 ceph-mon[47364]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:24:45.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:44 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:24:45.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:44 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:24:45.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:44 vm00 ceph-mon[51670]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:24:45.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:44 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:24:45.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:44 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:24:45.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:44 vm08 ceph-mon[49535]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:24:46.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:45 vm08 ceph-mon[49535]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:24:46.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:45 vm00 ceph-mon[47364]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:24:46.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:45 vm00 ceph-mon[51670]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:24:47.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:24:46 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:24:46.978Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:24:47.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:24:46 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:24:46.978Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:24:48.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:47 vm00 ceph-mon[47364]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:48.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:47 vm00 ceph-mon[51670]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:48.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:47 vm08 ceph-mon[49535]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:49.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:24:49 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:24:49] "GET /metrics HTTP/1.1" 200 37538 "" "Prometheus/2.51.0" 2026-03-10T13:24:50.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:49 vm00 ceph-mon[47364]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:24:50.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:49 vm00 ceph-mon[51670]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:24:50.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:49 vm08 ceph-mon[49535]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:24:51.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:50 vm00 ceph-mon[47364]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:51.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:50 vm00 ceph-mon[47364]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:24:51.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:50 vm00 ceph-mon[51670]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:51.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:50 vm00 ceph-mon[51670]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:24:51.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:50 vm08 ceph-mon[49535]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:51.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:50 vm08 ceph-mon[49535]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:24:52.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:51 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:24:52.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:51 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:24:52.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:51 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:24:53.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:52 vm00 ceph-mon[47364]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:53.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:52 vm00 ceph-mon[51670]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:53.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:52 vm08 ceph-mon[49535]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:55.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:54 vm00 ceph-mon[47364]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:24:55.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:54 vm00 ceph-mon[51670]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:24:55.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:54 vm08 ceph-mon[49535]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:24:57.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:56 vm00 ceph-mon[47364]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:57.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:56 vm00 ceph-mon[51670]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:57.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:24:56 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:24:56.978Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:24:57.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:24:56 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:24:56.979Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:24:57.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:56 vm08 ceph-mon[49535]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:24:59.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:24:59 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:24:59] "GET /metrics HTTP/1.1" 200 37551 "" "Prometheus/2.51.0" 2026-03-10T13:25:00.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:24:59 vm00 ceph-mon[47364]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:25:00.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:24:59 vm00 ceph-mon[51670]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:25:00.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:24:59 vm08 ceph-mon[49535]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:25:01.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:00 vm00 ceph-mon[47364]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:25:01.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:00 vm00 ceph-mon[47364]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:25:01.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:00 vm00 ceph-mon[51670]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:25:01.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:00 vm00 ceph-mon[51670]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:25:01.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:00 vm08 ceph-mon[49535]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:25:01.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:00 vm08 ceph-mon[49535]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:25:03.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:02 vm00 ceph-mon[47364]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:25:03.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:02 vm00 ceph-mon[51670]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:25:03.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:02 vm08 ceph-mon[49535]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:25:06.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:05 vm08 ceph-mon[49535]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:25:06.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:05 vm00 ceph-mon[47364]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:25:06.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:05 vm00 ceph-mon[51670]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:25:07.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:06 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:25:07.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:06 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:25:07.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:25:06 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:25:06.979Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:25:07.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:25:06 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:25:06.980Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:25:07.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:06 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:25:08.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:07 vm00 ceph-mon[47364]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:25:08.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:07 vm00 ceph-mon[51670]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:25:08.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:07 vm08 ceph-mon[49535]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:25:09.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:09 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:25:09] "GET /metrics HTTP/1.1" 200 37550 "" "Prometheus/2.51.0" 2026-03-10T13:25:10.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:09 vm08 ceph-mon[49535]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:25:10.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:09 vm00 ceph-mon[47364]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:25:10.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:09 vm00 ceph-mon[51670]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:25:12.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:11 vm08 ceph-mon[49535]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:25:12.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:11 vm08 ceph-mon[49535]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:25:12.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:11 vm00 ceph-mon[47364]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:25:12.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:11 vm00 ceph-mon[47364]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:25:12.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:11 vm00 ceph-mon[51670]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:25:12.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:11 vm00 ceph-mon[51670]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:25:14.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:13 vm08 ceph-mon[49535]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:25:14.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:13 vm00 ceph-mon[47364]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:25:14.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:13 vm00 ceph-mon[51670]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:25:16.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:15 vm00 ceph-mon[47364]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:25:16.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:15 vm00 ceph-mon[51670]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:25:16.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:15 vm08 ceph-mon[49535]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:25:17.154 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:16 vm00 ceph-mon[47364]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:25:17.154 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:25:16 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:25:16.980Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:25:17.154 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:25:16 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:25:16.981Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:25:17.154 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:16 vm00 ceph-mon[51670]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:25:17.171 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:16 vm08 ceph-mon[49535]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:25:19.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:19 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:25:19] "GET /metrics HTTP/1.1" 200 37550 "" "Prometheus/2.51.0" 2026-03-10T13:25:20.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:19 vm08 ceph-mon[49535]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:25:20.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:19 vm00 ceph-mon[47364]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:25:20.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:19 vm00 ceph-mon[51670]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:25:22.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:21 vm08 ceph-mon[49535]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:25:22.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:21 vm08 ceph-mon[49535]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:25:22.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:21 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:25:22.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:21 vm00 ceph-mon[47364]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:25:22.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:21 vm00 ceph-mon[47364]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:25:22.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:21 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:25:22.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:21 vm00 ceph-mon[51670]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:25:22.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:21 vm00 ceph-mon[51670]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:25:22.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:21 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:25:24.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:23 vm08 ceph-mon[49535]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:25:24.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:23 vm00 ceph-mon[47364]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:25:24.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:23 vm00 ceph-mon[51670]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:25:26.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:25 vm00 ceph-mon[47364]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:25:26.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:25 vm00 ceph-mon[51670]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:25:26.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:25 vm08 ceph-mon[49535]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:25:27.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:26 vm00 ceph-mon[47364]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:25:27.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:25:26 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:25:26.980Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:25:27.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:25:26 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:25:26.981Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:25:27.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:26 vm00 ceph-mon[51670]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:25:27.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:26 vm08 ceph-mon[49535]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:25:29.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:29 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:25:29] "GET /metrics HTTP/1.1" 200 37552 "" "Prometheus/2.51.0" 2026-03-10T13:25:30.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:29 vm08 ceph-mon[49535]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:25:30.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:29 vm00 ceph-mon[47364]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:25:30.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:29 vm00 ceph-mon[51670]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:25:32.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:31 vm08 ceph-mon[49535]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:25:32.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:31 vm08 ceph-mon[49535]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:25:32.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:31 vm00 ceph-mon[47364]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:25:32.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:31 vm00 ceph-mon[47364]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:25:32.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:31 vm00 ceph-mon[51670]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:25:32.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:31 vm00 ceph-mon[51670]: from='client.24991 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:25:34.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:33 vm00 ceph-mon[47364]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:25:34.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:33 vm00 ceph-mon[51670]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:25:34.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:33 vm08 ceph-mon[49535]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:25:36.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:35 vm00 ceph-mon[47364]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:25:36.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:35 vm00 ceph-mon[51670]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:25:36.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:35 vm08 ceph-mon[49535]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:25:36.374 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-10T13:25:36.870 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T13:25:36.870 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (12m) 2m ago 18m 26.4M - 0.25.0 c8568f914cd2 12fde3cf83cb 2026-03-10T13:25:36.870 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm08 *:3000 running (11m) 2m ago 18m 46.9M - dad864ee21e9 263cac442a99 2026-03-10T13:25:36.870 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dezodo vm00 running (2m) 2m ago 18m 49.0M - 3.5 e1d6a67b021e f4f174d57c49 2026-03-10T13:25:36.870 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm08 *:8443,9283 running (14m) 2m ago 20m 544M - 19.2.3-678-ge911bdeb 654f31e6858e 62b908c184a8 2026-03-10T13:25:36.870 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (6m) 2m ago 21m 487M - 19.2.3-678-ge911bdeb 654f31e6858e 5bc576d4d32b 2026-03-10T13:25:36.870 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (21m) 2m ago 21m 69.0M 2048M 17.2.0 e1d6a67b021e f0e3f322471c 2026-03-10T13:25:36.870 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm08 running (20m) 2m ago 20m 54.5M 2048M 17.2.0 e1d6a67b021e d3c1458bc898 2026-03-10T13:25:36.870 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (20m) 2m ago 20m 50.9M 2048M 17.2.0 e1d6a67b021e d00b7fd44c23 2026-03-10T13:25:36.870 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (12m) 2m ago 18m 10.4M - 1.7.0 72c9c2088986 bcf883401619 2026-03-10T13:25:36.870 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm08 *:9100 running (11m) 2m ago 18m 10.0M - 1.7.0 72c9c2088986 4ac83f03f818 2026-03-10T13:25:36.870 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (20m) 2m ago 20m 52.1M 4096M 17.2.0 e1d6a67b021e 2919c7073fa7 2026-03-10T13:25:36.870 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (19m) 2m ago 19m 56.8M 4096M 17.2.0 e1d6a67b021e 647927dc41ea 2026-03-10T13:25:36.871 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (19m) 2m ago 19m 54.2M 4096M 17.2.0 e1d6a67b021e 1e417e82c2b9 2026-03-10T13:25:36.871 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (19m) 2m ago 19m 52.7M 4096M 17.2.0 e1d6a67b021e 98bb6d7ea69f 2026-03-10T13:25:36.871 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm08 running (19m) 2m ago 19m 54.5M 4096M 17.2.0 e1d6a67b021e e349440ca776 2026-03-10T13:25:36.871 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm08 running (19m) 2m ago 19m 55.6M 4096M 17.2.0 e1d6a67b021e 9a6a2a67cc99 2026-03-10T13:25:36.871 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm08 running (19m) 2m ago 19m 51.4M 4096M 17.2.0 e1d6a67b021e c27676916d52 2026-03-10T13:25:36.871 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm08 running (19m) 2m ago 19m 53.2M 4096M 17.2.0 e1d6a67b021e bf67951990a5 2026-03-10T13:25:36.871 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm08 *:9095 running (2m) 2m ago 18m 46.6M - 2.51.0 1d3b7f56885b e1b806e63eed 2026-03-10T13:25:36.871 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.tvlvzo vm00 *:8000 running (18m) 2m ago 18m 95.8M - 17.2.0 e1d6a67b021e be24eac16807 2026-03-10T13:25:36.871 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm08.ljayps vm08 *:8000 running (18m) 2m ago 18m 92.8M - 17.2.0 e1d6a67b021e 67b525427823 2026-03-10T13:25:36.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:36 vm00 ceph-mon[47364]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:25:36.871 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:36 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:25:36.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:36 vm00 ceph-mon[51670]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:25:36.871 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:36 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:25:36.936 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ls' 2026-03-10T13:25:37.136 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:25:36 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:25:36.981Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:25:37.136 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:25:36 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:25:36.982Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:25:37.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:36 vm08 ceph-mon[49535]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:25:37.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:36 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:25:37.380 INFO:teuthology.orchestra.run.vm00.stdout:NAME PORTS RUNNING REFRESHED AGE PLACEMENT 2026-03-10T13:25:37.381 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager ?:9093,9094 1/1 2m ago 18m vm00=a;count:1 2026-03-10T13:25:37.381 INFO:teuthology.orchestra.run.vm00.stdout:grafana ?:3000 1/1 2m ago 18m vm08=a;count:1 2026-03-10T13:25:37.381 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo ?:5000 1/1 2m ago 18m count:1 2026-03-10T13:25:37.381 INFO:teuthology.orchestra.run.vm00.stdout:mgr 2/2 2m ago 20m vm00=y;vm08=x;count:2 2026-03-10T13:25:37.381 INFO:teuthology.orchestra.run.vm00.stdout:mon 3/3 2m ago 20m vm00:192.168.123.100=a;vm00:[v2:192.168.123.100:3301,v1:192.168.123.100:6790]=c;vm08:192.168.123.108=b;count:3 2026-03-10T13:25:37.381 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter ?:9100 2/2 2m ago 18m vm00=a;vm08=b;count:2 2026-03-10T13:25:37.381 INFO:teuthology.orchestra.run.vm00.stdout:osd 8 2m ago - 2026-03-10T13:25:37.381 INFO:teuthology.orchestra.run.vm00.stdout:prometheus ?:9095 1/1 2m ago 18m vm08=a;count:1 2026-03-10T13:25:37.381 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo ?:8000 2/2 2m ago 18m count:2 2026-03-10T13:25:37.431 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions' 2026-03-10T13:25:37.933 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:37 vm00 ceph-mon[47364]: from='client.15183 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:25:37.933 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:37 vm00 ceph-mon[47364]: from='client.25036 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:25:37.933 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:37 vm00 ceph-mon[51670]: from='client.15183 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:25:37.933 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:37 vm00 ceph-mon[51670]: from='client.25036 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:25:37.934 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:25:37.934 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T13:25:37.934 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-10T13:25:37.934 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:25:37.934 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T13:25:37.934 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T13:25:37.934 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:25:37.934 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T13:25:37.934 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-10T13:25:37.934 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:25:37.934 INFO:teuthology.orchestra.run.vm00.stdout: "mds": {}, 2026-03-10T13:25:37.934 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T13:25:37.934 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-10T13:25:37.934 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:25:37.934 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T13:25:37.934 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 13, 2026-03-10T13:25:37.934 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T13:25:37.934 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T13:25:37.934 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:25:37.993 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr' 2026-03-10T13:25:38.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:37 vm08 ceph-mon[49535]: from='client.15183 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:25:38.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:37 vm08 ceph-mon[49535]: from='client.25036 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:25:38.868 INFO:teuthology.orchestra.run.vm00.stdout:Initiating upgrade to quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:25:38.919 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'while ceph orch upgrade status | jq '"'"'.in_progress'"'"' | grep true && ! ceph orch upgrade status | jq '"'"'.message'"'"' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done' 2026-03-10T13:25:39.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:38 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/55571887' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:39.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:38 vm00 ceph-mon[47364]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:25:39.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:38 vm00 ceph-mon[47364]: from='client.25045 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "mgr", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:25:39.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:38 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/55571887' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:39.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:38 vm00 ceph-mon[51670]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:25:39.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:38 vm00 ceph-mon[51670]: from='client.25045 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "mgr", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:25:39.099 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:38 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/55571887' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:39.099 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:38 vm08 ceph-mon[49535]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:25:39.099 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:38 vm08 ceph-mon[49535]: from='client.25045 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "mgr", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:25:39.496 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T13:25:39.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:39 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:25:39] "GET /metrics HTTP/1.1" 200 37550 "" "Prometheus/2.51.0" 2026-03-10T13:25:39.902 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T13:25:39.902 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (12m) 2m ago 18m 26.4M - 0.25.0 c8568f914cd2 12fde3cf83cb 2026-03-10T13:25:39.902 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm08 *:3000 running (11m) 2m ago 18m 46.9M - dad864ee21e9 263cac442a99 2026-03-10T13:25:39.902 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dezodo vm00 running (2m) 2m ago 18m 49.0M - 3.5 e1d6a67b021e f4f174d57c49 2026-03-10T13:25:39.902 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm08 *:8443,9283 running (14m) 2m ago 20m 544M - 19.2.3-678-ge911bdeb 654f31e6858e 62b908c184a8 2026-03-10T13:25:39.902 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (6m) 2m ago 21m 487M - 19.2.3-678-ge911bdeb 654f31e6858e 5bc576d4d32b 2026-03-10T13:25:39.902 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (21m) 2m ago 21m 69.0M 2048M 17.2.0 e1d6a67b021e f0e3f322471c 2026-03-10T13:25:39.902 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm08 running (20m) 2m ago 20m 54.5M 2048M 17.2.0 e1d6a67b021e d3c1458bc898 2026-03-10T13:25:39.902 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (20m) 2m ago 20m 50.9M 2048M 17.2.0 e1d6a67b021e d00b7fd44c23 2026-03-10T13:25:39.902 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (12m) 2m ago 18m 10.4M - 1.7.0 72c9c2088986 bcf883401619 2026-03-10T13:25:39.902 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm08 *:9100 running (12m) 2m ago 18m 10.0M - 1.7.0 72c9c2088986 4ac83f03f818 2026-03-10T13:25:39.902 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (20m) 2m ago 20m 52.1M 4096M 17.2.0 e1d6a67b021e 2919c7073fa7 2026-03-10T13:25:39.902 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (20m) 2m ago 20m 56.8M 4096M 17.2.0 e1d6a67b021e 647927dc41ea 2026-03-10T13:25:39.902 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (19m) 2m ago 19m 54.2M 4096M 17.2.0 e1d6a67b021e 1e417e82c2b9 2026-03-10T13:25:39.902 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (19m) 2m ago 19m 52.7M 4096M 17.2.0 e1d6a67b021e 98bb6d7ea69f 2026-03-10T13:25:39.902 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm08 running (19m) 2m ago 19m 54.5M 4096M 17.2.0 e1d6a67b021e e349440ca776 2026-03-10T13:25:39.902 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm08 running (19m) 2m ago 19m 55.6M 4096M 17.2.0 e1d6a67b021e 9a6a2a67cc99 2026-03-10T13:25:39.902 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm08 running (19m) 2m ago 19m 51.4M 4096M 17.2.0 e1d6a67b021e c27676916d52 2026-03-10T13:25:39.902 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm08 running (19m) 2m ago 19m 53.2M 4096M 17.2.0 e1d6a67b021e bf67951990a5 2026-03-10T13:25:39.902 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm08 *:9095 running (2m) 2m ago 18m 46.6M - 2.51.0 1d3b7f56885b e1b806e63eed 2026-03-10T13:25:39.902 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.tvlvzo vm00 *:8000 running (18m) 2m ago 18m 95.8M - 17.2.0 e1d6a67b021e be24eac16807 2026-03-10T13:25:39.902 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm08.ljayps vm08 *:8000 running (18m) 2m ago 18m 92.8M - 17.2.0 e1d6a67b021e 67b525427823 2026-03-10T13:25:40.130 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:39 vm00 ceph-mon[47364]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:25:40.131 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:39 vm00 ceph-mon[47364]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:25:40.131 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:39 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:25:40.131 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:39 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:25:40.131 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:39 vm00 ceph-mon[47364]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:25:40.131 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:39 vm00 ceph-mon[47364]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:25:40.131 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:39 vm00 ceph-mon[47364]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:25:40.131 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:39 vm00 ceph-mon[47364]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-10T13:25:40.131 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:39 vm00 ceph-mon[47364]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-10T13:25:40.131 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:39 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:25:40.131 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:39 vm00 ceph-mon[47364]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-10T13:25:40.131 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:39 vm00 ceph-mon[47364]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-10T13:25:40.131 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:39 vm00 ceph-mon[47364]: Failing over to other MGR 2026-03-10T13:25:40.131 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:39 vm00 ceph-mon[47364]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-10T13:25:40.131 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:39 vm00 ceph-mon[47364]: from='mgr.24824 ' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-10T13:25:40.131 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:39 vm00 ceph-mon[47364]: osdmap e94: 8 total, 8 up, 8 in 2026-03-10T13:25:40.131 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:39 vm00 ceph-mon[47364]: from='client.24959 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:25:40.131 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:39 vm00 ceph-mon[47364]: from='client.15201 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:25:40.131 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:39 vm00 ceph-mon[51670]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:25:40.131 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:39 vm00 ceph-mon[51670]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:25:40.131 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:39 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:25:40.131 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:39 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:25:40.131 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:39 vm00 ceph-mon[51670]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:25:40.131 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:39 vm00 ceph-mon[51670]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:25:40.131 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:39 vm00 ceph-mon[51670]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:25:40.131 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:39 vm00 ceph-mon[51670]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-10T13:25:40.131 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:39 vm00 ceph-mon[51670]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-10T13:25:40.131 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:39 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:25:40.131 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:39 vm00 ceph-mon[51670]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-10T13:25:40.131 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:39 vm00 ceph-mon[51670]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-10T13:25:40.131 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:39 vm00 ceph-mon[51670]: Failing over to other MGR 2026-03-10T13:25:40.131 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:39 vm00 ceph-mon[51670]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-10T13:25:40.131 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:39 vm00 ceph-mon[51670]: from='mgr.24824 ' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-10T13:25:40.131 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:39 vm00 ceph-mon[51670]: osdmap e94: 8 total, 8 up, 8 in 2026-03-10T13:25:40.131 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:39 vm00 ceph-mon[51670]: from='client.24959 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:25:40.131 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:39 vm00 ceph-mon[51670]: from='client.15201 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:25:40.132 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:25:40.132 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T13:25:40.133 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-10T13:25:40.133 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:25:40.133 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T13:25:40.133 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T13:25:40.133 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:25:40.133 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T13:25:40.133 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-10T13:25:40.133 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:25:40.133 INFO:teuthology.orchestra.run.vm00.stdout: "mds": {}, 2026-03-10T13:25:40.133 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T13:25:40.133 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-10T13:25:40.133 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:25:40.133 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T13:25:40.133 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 13, 2026-03-10T13:25:40.133 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T13:25:40.133 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T13:25:40.133 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:25:40.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:39 vm08 ceph-mon[49535]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:25:40.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:39 vm08 ceph-mon[49535]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:25:40.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:39 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:25:40.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:39 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:25:40.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:39 vm08 ceph-mon[49535]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:25:40.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:39 vm08 ceph-mon[49535]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:25:40.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:39 vm08 ceph-mon[49535]: from='mgr.24824 ' entity='mgr.x' 2026-03-10T13:25:40.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:39 vm08 ceph-mon[49535]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-10T13:25:40.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:39 vm08 ceph-mon[49535]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-10T13:25:40.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:39 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:25:40.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:39 vm08 ceph-mon[49535]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-10T13:25:40.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:39 vm08 ceph-mon[49535]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-10T13:25:40.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:39 vm08 ceph-mon[49535]: Failing over to other MGR 2026-03-10T13:25:40.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:39 vm08 ceph-mon[49535]: from='mgr.24824 192.168.123.108:0/1835423243' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-10T13:25:40.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:39 vm08 ceph-mon[49535]: from='mgr.24824 ' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-10T13:25:40.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:39 vm08 ceph-mon[49535]: osdmap e94: 8 total, 8 up, 8 in 2026-03-10T13:25:40.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:39 vm08 ceph-mon[49535]: from='client.24959 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:25:40.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:39 vm08 ceph-mon[49535]: from='client.15201 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:25:40.320 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:25:40.320 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T13:25:40.320 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": true, 2026-03-10T13:25:40.320 INFO:teuthology.orchestra.run.vm00.stdout: "which": "Upgrading daemons of type(s) mgr", 2026-03-10T13:25:40.320 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [ 2026-03-10T13:25:40.320 INFO:teuthology.orchestra.run.vm00.stdout: "mgr" 2026-03-10T13:25:40.320 INFO:teuthology.orchestra.run.vm00.stdout: ], 2026-03-10T13:25:40.320 INFO:teuthology.orchestra.run.vm00.stdout: "progress": "2/2 daemons upgraded", 2026-03-10T13:25:40.320 INFO:teuthology.orchestra.run.vm00.stdout: "message": "Doing first pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df image", 2026-03-10T13:25:40.320 INFO:teuthology.orchestra.run.vm00.stdout: "is_paused": false 2026-03-10T13:25:40.320 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:25:40.385 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:40 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:25:40] ENGINE Bus STOPPING 2026-03-10T13:25:40.669 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:40 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:25:40] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-10T13:25:40.669 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:40 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:25:40] ENGINE Bus STOPPED 2026-03-10T13:25:40.771 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:40 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:25:40.344+0000 7f231d56b640 -1 mgr handle_mgr_map I was active but no longer am 2026-03-10T13:25:40.771 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:40 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ignoring --setuser ceph since I am not root 2026-03-10T13:25:40.771 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:40 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: ignoring --setgroup ceph since I am not root 2026-03-10T13:25:40.771 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:40 vm08 ceph-mgr[68040]: -- 192.168.123.108:0/2558479251 <== mon.2 v2:192.168.123.108:3300/0 4 ==== auth_reply(proto 2 0 (0) Success) ==== 194+0+0 (secure 0 0 0) 0x563af5ef74a0 con 0x563af5ed4800 2026-03-10T13:25:40.771 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:40 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:25:40.462+0000 7f8338500140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-10T13:25:40.771 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:40 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:25:40.500+0000 7f8338500140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T13:25:40.932 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/4240690185' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:40.932 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[47364]: from='mgr.24824 ' entity='mgr.x' cmd='[{"prefix": "mgr fail", "who": "x"}]': finished 2026-03-10T13:25:40.932 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[47364]: mgrmap e35: y(active, starting, since 1.00036s) 2026-03-10T13:25:40.932 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:25:40.932 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:25:40.932 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:25:40.932 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T13:25:40.932 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T13:25:40.932 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T13:25:40.932 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T13:25:40.932 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:25:40.932 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:25:40.932 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:25:40.932 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:25:40.932 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:25:40.932 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T13:25:40.932 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T13:25:40.932 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T13:25:40.932 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[47364]: Manager daemon y is now available 2026-03-10T13:25:40.932 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:25:40.932 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:25:40.932 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:25:40.932 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:40 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:25:40] ENGINE Bus STARTING 2026-03-10T13:25:40.932 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:40 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:25:40] ENGINE Serving on http://:::9283 2026-03-10T13:25:40.932 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:40 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:25:40] ENGINE Bus STARTED 2026-03-10T13:25:40.933 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/4240690185' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:40.933 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[51670]: from='mgr.24824 ' entity='mgr.x' cmd='[{"prefix": "mgr fail", "who": "x"}]': finished 2026-03-10T13:25:40.933 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[51670]: mgrmap e35: y(active, starting, since 1.00036s) 2026-03-10T13:25:40.933 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:25:40.933 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:25:40.933 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:25:40.933 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T13:25:40.933 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T13:25:40.933 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T13:25:40.933 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T13:25:40.933 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:25:40.933 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:25:40.933 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:25:40.933 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:25:40.933 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:25:40.933 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T13:25:40.933 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T13:25:40.933 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T13:25:40.933 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[51670]: Manager daemon y is now available 2026-03-10T13:25:40.933 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:25:40.933 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:25:40.933 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:40 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:25:41.238 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:40 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/4240690185' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:41.238 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:40 vm08 ceph-mon[49535]: from='mgr.24824 ' entity='mgr.x' cmd='[{"prefix": "mgr fail", "who": "x"}]': finished 2026-03-10T13:25:41.238 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:40 vm08 ceph-mon[49535]: mgrmap e35: y(active, starting, since 1.00036s) 2026-03-10T13:25:41.238 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:40 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:25:41.238 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:40 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:25:41.238 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:40 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:25:41.238 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:40 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T13:25:41.238 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:40 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T13:25:41.238 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:40 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T13:25:41.238 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:40 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T13:25:41.238 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:40 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:25:41.238 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:40 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:25:41.238 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:40 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:25:41.238 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:40 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:25:41.238 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:40 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:25:41.238 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:40 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T13:25:41.238 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:40 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T13:25:41.238 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:40 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T13:25:41.238 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:40 vm08 ceph-mon[49535]: Manager daemon y is now available 2026-03-10T13:25:41.238 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:40 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:25:41.238 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:40 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:25:41.238 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:40 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:25:41.238 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:40 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:25:40.903+0000 7f8338500140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T13:25:41.238 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:25:41.235+0000 7f8338500140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T13:25:41.507 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-10T13:25:41.507 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-10T13:25:41.507 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: from numpy import show_config as show_numpy_config 2026-03-10T13:25:41.507 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:25:41.324+0000 7f8338500140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T13:25:41.507 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:25:41.368+0000 7f8338500140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T13:25:41.508 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:25:41.451+0000 7f8338500140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T13:25:41.508 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:25:41.386Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=ceph-exporter msg="Unable to refresh target groups" err="Get \"http://192.168.123.108:8765/sd/prometheus/sd-config?service=ceph-exporter\": dial tcp 192.168.123.108:8765: connect: connection refused" 2026-03-10T13:25:41.508 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:25:41.387Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=ceph msg="Unable to refresh target groups" err="Get \"http://192.168.123.108:8765/sd/prometheus/sd-config?service=mgr-prometheus\": dial tcp 192.168.123.108:8765: connect: connection refused" 2026-03-10T13:25:41.508 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:25:41.387Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=nvmeof msg="Unable to refresh target groups" err="Get \"http://192.168.123.108:8765/sd/prometheus/sd-config?service=nvmeof\": dial tcp 192.168.123.108:8765: connect: connection refused" 2026-03-10T13:25:41.508 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:25:41.387Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=node msg="Unable to refresh target groups" err="Get \"http://192.168.123.108:8765/sd/prometheus/sd-config?service=node-exporter\": dial tcp 192.168.123.108:8765: connect: connection refused" 2026-03-10T13:25:41.508 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:25:41.387Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=nfs msg="Unable to refresh target groups" err="Get \"http://192.168.123.108:8765/sd/prometheus/sd-config?service=nfs\": dial tcp 192.168.123.108:8765: connect: connection refused" 2026-03-10T13:25:41.508 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:25:41.387Z caller=refresh.go:90 level=error component="discovery manager notify" discovery=http config=config-0 msg="Unable to refresh target groups" err="Get \"http://192.168.123.108:8765/sd/prometheus/sd-config?service=alertmanager\": dial tcp 192.168.123.108:8765: connect: connection refused" 2026-03-10T13:25:42.236 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:41 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:25:41.980+0000 7f8338500140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T13:25:42.237 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:42 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:25:42.086+0000 7f8338500140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T13:25:42.237 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:42 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:25:42.123+0000 7f8338500140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T13:25:42.237 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:42 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:25:42.155+0000 7f8338500140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T13:25:42.237 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:42 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:25:42.193+0000 7f8338500140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T13:25:42.510 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:42 vm08 ceph-mon[49535]: mgrmap e36: y(active, since 2s) 2026-03-10T13:25:42.510 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:42 vm08 ceph-mon[49535]: [10/Mar/2026:13:25:41] ENGINE Bus STARTING 2026-03-10T13:25:42.510 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:42 vm08 ceph-mon[49535]: [10/Mar/2026:13:25:41] ENGINE Serving on https://192.168.123.100:7150 2026-03-10T13:25:42.510 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:42 vm08 ceph-mon[49535]: [10/Mar/2026:13:25:41] ENGINE Client ('192.168.123.100', 33952) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T13:25:42.510 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:42 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:25:42.231+0000 7f8338500140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T13:25:42.510 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:42 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:25:42.452+0000 7f8338500140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T13:25:42.510 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:42 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:25:42.508+0000 7f8338500140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T13:25:42.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:42 vm00 ceph-mon[47364]: mgrmap e36: y(active, since 2s) 2026-03-10T13:25:42.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:42 vm00 ceph-mon[47364]: [10/Mar/2026:13:25:41] ENGINE Bus STARTING 2026-03-10T13:25:42.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:42 vm00 ceph-mon[47364]: [10/Mar/2026:13:25:41] ENGINE Serving on https://192.168.123.100:7150 2026-03-10T13:25:42.624 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:42 vm00 ceph-mon[47364]: [10/Mar/2026:13:25:41] ENGINE Client ('192.168.123.100', 33952) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T13:25:42.625 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:42 vm00 ceph-mon[51670]: mgrmap e36: y(active, since 2s) 2026-03-10T13:25:42.625 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:42 vm00 ceph-mon[51670]: [10/Mar/2026:13:25:41] ENGINE Bus STARTING 2026-03-10T13:25:42.625 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:42 vm00 ceph-mon[51670]: [10/Mar/2026:13:25:41] ENGINE Serving on https://192.168.123.100:7150 2026-03-10T13:25:42.625 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:42 vm00 ceph-mon[51670]: [10/Mar/2026:13:25:41] ENGINE Client ('192.168.123.100', 33952) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T13:25:42.768 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:42 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:25:42.766+0000 7f8338500140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-10T13:25:43.355 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:43 vm08 ceph-mon[49535]: [10/Mar/2026:13:25:42] ENGINE Serving on http://192.168.123.100:8765 2026-03-10T13:25:43.355 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:43 vm08 ceph-mon[49535]: [10/Mar/2026:13:25:42] ENGINE Bus STARTED 2026-03-10T13:25:43.355 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:43 vm08 ceph-mon[49535]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:25:43.355 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:43 vm08 ceph-mon[49535]: mgrmap e37: y(active, since 3s) 2026-03-10T13:25:43.355 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:43 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:43.355 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:43 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:43.355 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:43 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:43.355 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:43 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:43.355 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:43 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:43.355 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:43 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:43.355 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:43 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:25:43.356 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:43 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:43.356 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:43 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:43.356 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:43 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:25:43.356 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:43 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:25:43.356 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:43 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:25:43.356 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:25:43.061+0000 7f8338500140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T13:25:43.356 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:25:43.099+0000 7f8338500140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T13:25:43.356 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:25:43.143+0000 7f8338500140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T13:25:43.356 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:25:43.227+0000 7f8338500140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T13:25:43.356 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:25:43.267+0000 7f8338500140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T13:25:43.356 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:25:43.353+0000 7f8338500140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T13:25:43.618 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:25:43.476+0000 7f8338500140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T13:25:43.618 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:25:43.616+0000 7f8338500140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T13:25:43.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:43 vm00 ceph-mon[47364]: [10/Mar/2026:13:25:42] ENGINE Serving on http://192.168.123.100:8765 2026-03-10T13:25:43.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:43 vm00 ceph-mon[47364]: [10/Mar/2026:13:25:42] ENGINE Bus STARTED 2026-03-10T13:25:43.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:43 vm00 ceph-mon[47364]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:25:43.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:43 vm00 ceph-mon[47364]: mgrmap e37: y(active, since 3s) 2026-03-10T13:25:43.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:43 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:43.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:43 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:43.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:43 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:43.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:43 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:43.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:43 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:43.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:43 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:43.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:43 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:25:43.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:43 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:43.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:43 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:43.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:43 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:25:43.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:43 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:25:43.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:43 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:25:43.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:43 vm00 ceph-mon[51670]: [10/Mar/2026:13:25:42] ENGINE Serving on http://192.168.123.100:8765 2026-03-10T13:25:43.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:43 vm00 ceph-mon[51670]: [10/Mar/2026:13:25:42] ENGINE Bus STARTED 2026-03-10T13:25:43.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:43 vm00 ceph-mon[51670]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:25:43.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:43 vm00 ceph-mon[51670]: mgrmap e37: y(active, since 3s) 2026-03-10T13:25:43.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:43 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:43.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:43 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:43.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:43 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:43.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:43 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:43.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:43 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:43.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:43 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:43.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:43 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:25:43.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:43 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:43.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:43 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:43.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:43 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:25:43.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:43 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:25:43.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:43 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:25:44.020 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:25:43.656+0000 7f8338500140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T13:25:44.020 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:25:43] ENGINE Bus STARTING 2026-03-10T13:25:44.020 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: CherryPy Checker: 2026-03-10T13:25:44.020 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: The Application mounted at '' has an empty config. 2026-03-10T13:25:44.020 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: 2026-03-10T13:25:44.021 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:25:43] ENGINE Serving on http://:::9283 2026-03-10T13:25:44.021 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[68036]: [10/Mar/2026:13:25:43] ENGINE Bus STARTED 2026-03-10T13:25:44.504 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:25:44.279+0000 7ff18ce36640 -1 log_channel(cephadm) log [ERR] : cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm00.dezodo ... 2026-03-10T13:25:44.504 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: Creating ceph-iscsi config... 2026-03-10T13:25:44.504 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: Write file: /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/iscsi.foo.vm00.dezodo/iscsi-gateway.cfg 2026-03-10T13:25:44.504 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: Write file: /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/iscsi.foo.vm00.dezodo/tcmu-runner-entrypoint.sh 2026-03-10T13:25:44.504 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: Non-zero exit code 1 from systemctl restart ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo 2026-03-10T13:25:44.504 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: systemctl: stderr Job for ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service failed because the control process exited with error code. 2026-03-10T13:25:44.504 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: systemctl: stderr See "systemctl status ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" and "journalctl -xeu ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" for details. 2026-03-10T13:25:44.504 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: Traceback (most recent call last): 2026-03-10T13:25:44.504 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-10T13:25:44.504 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: return _run_code(code, main_globals, None, 2026-03-10T13:25:44.504 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-10T13:25:44.504 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: exec(code, run_globals) 2026-03-10T13:25:44.504 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-10T13:25:44.504 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: RuntimeError: Failed command: systemctl restart ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo: Job for ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service failed because the control process exited with error code. 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: See "systemctl status ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" and "journalctl -xeu ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" for details. 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: Traceback (most recent call last): 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1145, in _check_daemons 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: self.mgr._daemon_action(daemon_spec, action=action) 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/usr/share/ceph/mgr/cephadm/module.py", line 2545, in _daemon_action 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: return self.wait_async( 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: return self.event_loop.get_result(coro, timeout) 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: return future.result(timeout) 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: return self.__get_result() 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: raise self._exception 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1381, in _create_daemon 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: out, err, code = await self._run_cephadm( 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1724, in _run_cephadm 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: raise OrchestratorError( 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: orchestrator._interface.OrchestratorError: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm00.dezodo ... 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: Creating ceph-iscsi config... 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: Write file: /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/iscsi.foo.vm00.dezodo/iscsi-gateway.cfg 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: Write file: /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/iscsi.foo.vm00.dezodo/tcmu-runner-entrypoint.sh 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: Non-zero exit code 1 from systemctl restart ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: systemctl: stderr Job for ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service failed because the control process exited with error code. 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: systemctl: stderr See "systemctl status ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" and "journalctl -xeu ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" for details. 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: Traceback (most recent call last): 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: return _run_code(code, main_globals, None, 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: exec(code, run_globals) 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: RuntimeError: Failed command: systemctl restart ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo: Job for ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service failed because the control process exited with error code. 2026-03-10T13:25:44.505 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: See "systemctl status ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" and "journalctl -xeu ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" for details. 2026-03-10T13:25:44.850 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:44 vm08 systemd[1]: Stopping Ceph prometheus.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:25:44.850 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:44 vm08 ceph-mon[49535]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T13:25:44.850 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:44 vm08 ceph-mon[49535]: Updating vm08:/etc/ceph/ceph.conf 2026-03-10T13:25:44.850 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:44 vm08 ceph-mon[49535]: Updating vm00:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.conf 2026-03-10T13:25:44.850 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:44 vm08 ceph-mon[49535]: Updating vm08:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.conf 2026-03-10T13:25:44.850 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:44 vm08 ceph-mon[49535]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:25:44.850 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:44 vm08 ceph-mon[49535]: Updating vm08:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:25:44.850 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:44 vm08 ceph-mon[49535]: Updating vm00:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.client.admin.keyring 2026-03-10T13:25:44.850 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:44 vm08 ceph-mon[49535]: Updating vm08:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.client.admin.keyring 2026-03-10T13:25:44.850 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:44 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:44.850 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:44 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:44.850 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:44 vm08 ceph-mon[49535]: Standby manager daemon x started 2026-03-10T13:25:44.850 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:44 vm08 ceph-mon[49535]: from='mgr.? 192.168.123.108:0/1104550319' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T13:25:44.850 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:44 vm08 ceph-mon[49535]: from='mgr.? 192.168.123.108:0/1104550319' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T13:25:44.850 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:44 vm08 ceph-mon[49535]: from='mgr.? 192.168.123.108:0/1104550319' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T13:25:44.850 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:44 vm08 ceph-mon[49535]: from='mgr.? 192.168.123.108:0/1104550319' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T13:25:44.850 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:44 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:44.850 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:44 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:44.850 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:44 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:44.850 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:44 vm08 ceph-mon[49535]: Reconfiguring iscsi.foo.vm00.dezodo (dependencies changed)... 2026-03-10T13:25:44.850 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:44 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:25:44.850 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:44 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:25:44.850 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:44 vm08 ceph-mon[49535]: Reconfiguring daemon iscsi.foo.vm00.dezodo on vm00 2026-03-10T13:25:45.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[47364]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T13:25:45.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[47364]: Updating vm08:/etc/ceph/ceph.conf 2026-03-10T13:25:45.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[47364]: Updating vm00:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.conf 2026-03-10T13:25:45.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[47364]: Updating vm08:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.conf 2026-03-10T13:25:45.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[47364]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:25:45.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[47364]: Updating vm08:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:25:45.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[47364]: Updating vm00:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.client.admin.keyring 2026-03-10T13:25:45.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[47364]: Updating vm08:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.client.admin.keyring 2026-03-10T13:25:45.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:45.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:45.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[47364]: Standby manager daemon x started 2026-03-10T13:25:45.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[47364]: from='mgr.? 192.168.123.108:0/1104550319' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T13:25:45.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[47364]: from='mgr.? 192.168.123.108:0/1104550319' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T13:25:45.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[47364]: from='mgr.? 192.168.123.108:0/1104550319' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T13:25:45.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[47364]: from='mgr.? 192.168.123.108:0/1104550319' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T13:25:45.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:45.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:45.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:45.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[47364]: Reconfiguring iscsi.foo.vm00.dezodo (dependencies changed)... 2026-03-10T13:25:45.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:25:45.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:25:45.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[47364]: Reconfiguring daemon iscsi.foo.vm00.dezodo on vm00 2026-03-10T13:25:45.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[51670]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T13:25:45.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[51670]: Updating vm08:/etc/ceph/ceph.conf 2026-03-10T13:25:45.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[51670]: Updating vm00:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.conf 2026-03-10T13:25:45.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[51670]: Updating vm08:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.conf 2026-03-10T13:25:45.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[51670]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:25:45.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[51670]: Updating vm08:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:25:45.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[51670]: Updating vm00:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.client.admin.keyring 2026-03-10T13:25:45.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[51670]: Updating vm08:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.client.admin.keyring 2026-03-10T13:25:45.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:45.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:45.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[51670]: Standby manager daemon x started 2026-03-10T13:25:45.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.108:0/1104550319' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T13:25:45.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.108:0/1104550319' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T13:25:45.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.108:0/1104550319' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T13:25:45.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.108:0/1104550319' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T13:25:45.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:45.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:45.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:45.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[51670]: Reconfiguring iscsi.foo.vm00.dezodo (dependencies changed)... 2026-03-10T13:25:45.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:25:45.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:25:45.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:44 vm00 ceph-mon[51670]: Reconfiguring daemon iscsi.foo.vm00.dezodo on vm00 2026-03-10T13:25:45.101 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:44 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:25:44.847Z caller=main.go:964 level=warn msg="Received SIGTERM, exiting gracefully..." 2026-03-10T13:25:45.101 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:44 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:25:44.847Z caller=main.go:988 level=info msg="Stopping scrape discovery manager..." 2026-03-10T13:25:45.101 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:44 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:25:44.847Z caller=main.go:1002 level=info msg="Stopping notify discovery manager..." 2026-03-10T13:25:45.102 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:44 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:25:44.847Z caller=manager.go:177 level=info component="rule manager" msg="Stopping rule manager..." 2026-03-10T13:25:45.102 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:44 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:25:44.847Z caller=main.go:984 level=info msg="Scrape discovery manager stopped" 2026-03-10T13:25:45.102 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:44 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:25:44.847Z caller=main.go:998 level=info msg="Notify discovery manager stopped" 2026-03-10T13:25:45.102 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:44 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:25:44.847Z caller=manager.go:187 level=info component="rule manager" msg="Rule manager stopped" 2026-03-10T13:25:45.102 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:44 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:25:44.847Z caller=main.go:1039 level=info msg="Stopping scrape manager..." 2026-03-10T13:25:45.102 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:44 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:25:44.848Z caller=main.go:1031 level=info msg="Scrape manager stopped" 2026-03-10T13:25:45.102 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:44 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:25:44.852Z caller=notifier.go:618 level=info component=notifier msg="Stopping notification manager..." 2026-03-10T13:25:45.102 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:44 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:25:44.852Z caller=main.go:1261 level=info msg="Notifier manager stopped" 2026-03-10T13:25:45.102 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:44 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[77222]: ts=2026-03-10T13:25:44.852Z caller=main.go:1273 level=info msg="See you next time!" 2026-03-10T13:25:45.102 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:44 vm08 podman[79259]: 2026-03-10 13:25:44.858246617 +0000 UTC m=+0.022689976 container died e1b806e63eed211791fd17781451bf1fa807254309561224c93f358a9780a180 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T13:25:45.102 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:44 vm08 podman[79259]: 2026-03-10 13:25:44.874299818 +0000 UTC m=+0.038743177 container remove e1b806e63eed211791fd17781451bf1fa807254309561224c93f358a9780a180 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T13:25:45.102 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:44 vm08 bash[79259]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a 2026-03-10T13:25:45.102 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:44 vm08 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@prometheus.a.service: Deactivated successfully. 2026-03-10T13:25:45.102 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:44 vm08 systemd[1]: Stopped Ceph prometheus.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:25:45.102 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:44 vm08 systemd[1]: Starting Ceph prometheus.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:25:45.102 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:45 vm08 podman[79328]: 2026-03-10 13:25:45.019912567 +0000 UTC m=+0.020215613 container create 3f9b2d0821c90b9fc8bbd802fbe04f0ae98fd04a9d094ba8bb73267e3eb852ae (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T13:25:45.102 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:45 vm08 podman[79328]: 2026-03-10 13:25:45.041225184 +0000 UTC m=+0.041528230 container init 3f9b2d0821c90b9fc8bbd802fbe04f0ae98fd04a9d094ba8bb73267e3eb852ae (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T13:25:45.102 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:45 vm08 podman[79328]: 2026-03-10 13:25:45.043713572 +0000 UTC m=+0.044016618 container start 3f9b2d0821c90b9fc8bbd802fbe04f0ae98fd04a9d094ba8bb73267e3eb852ae (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T13:25:45.102 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:45 vm08 bash[79328]: 3f9b2d0821c90b9fc8bbd802fbe04f0ae98fd04a9d094ba8bb73267e3eb852ae 2026-03-10T13:25:45.102 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:45 vm08 podman[79328]: 2026-03-10 13:25:45.009495226 +0000 UTC m=+0.009798272 image pull 1d3b7f56885b6dd623f1785be963aa9c195f86bc256ea454e8d02a7980b79c53 quay.io/prometheus/prometheus:v2.51.0 2026-03-10T13:25:45.102 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:45 vm08 systemd[1]: Started Ceph prometheus.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:25:45.102 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:45 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[79338]: ts=2026-03-10T13:25:45.073Z caller=main.go:617 level=info msg="Starting Prometheus Server" mode=server version="(version=2.51.0, branch=HEAD, revision=c05c15512acb675e3f6cd662a6727854e93fc024)" 2026-03-10T13:25:45.102 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:45 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[79338]: ts=2026-03-10T13:25:45.073Z caller=main.go:622 level=info build_context="(go=go1.22.1, platform=linux/amd64, user=root@b5723e458358, date=20240319-10:54:45, tags=netgo,builtinassets,stringlabels)" 2026-03-10T13:25:45.102 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:45 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[79338]: ts=2026-03-10T13:25:45.073Z caller=main.go:623 level=info host_details="(Linux 5.14.0-686.el9.x86_64 #1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026 x86_64 vm08 (none))" 2026-03-10T13:25:45.102 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:45 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[79338]: ts=2026-03-10T13:25:45.073Z caller=main.go:624 level=info fd_limits="(soft=1048576, hard=1048576)" 2026-03-10T13:25:45.102 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:45 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[79338]: ts=2026-03-10T13:25:45.073Z caller=main.go:625 level=info vm_limits="(soft=unlimited, hard=unlimited)" 2026-03-10T13:25:45.102 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:45 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[79338]: ts=2026-03-10T13:25:45.078Z caller=web.go:568 level=info component=web msg="Start listening for connections" address=:9095 2026-03-10T13:25:45.102 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:45 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[79338]: ts=2026-03-10T13:25:45.079Z caller=main.go:1129 level=info msg="Starting TSDB ..." 2026-03-10T13:25:45.102 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:45 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[79338]: ts=2026-03-10T13:25:45.079Z caller=tls_config.go:313 level=info component=web msg="Listening on" address=[::]:9095 2026-03-10T13:25:45.102 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:45 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[79338]: ts=2026-03-10T13:25:45.079Z caller=tls_config.go:316 level=info component=web msg="TLS is disabled." http2=false address=[::]:9095 2026-03-10T13:25:45.102 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:45 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[79338]: ts=2026-03-10T13:25:45.081Z caller=head.go:616 level=info component=tsdb msg="Replaying on-disk memory mappable chunks if any" 2026-03-10T13:25:45.102 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:45 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[79338]: ts=2026-03-10T13:25:45.081Z caller=head.go:698 level=info component=tsdb msg="On-disk memory mappable chunks replay completed" duration=1.022µs 2026-03-10T13:25:45.102 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:45 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[79338]: ts=2026-03-10T13:25:45.081Z caller=head.go:706 level=info component=tsdb msg="Replaying WAL, this may take a while" 2026-03-10T13:25:45.102 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:45 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[79338]: ts=2026-03-10T13:25:45.086Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=0 maxSegment=5 2026-03-10T13:25:45.102 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:45 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[79338]: ts=2026-03-10T13:25:45.095Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=1 maxSegment=5 2026-03-10T13:25:45.370 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:45 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[79338]: ts=2026-03-10T13:25:45.100Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=2 maxSegment=5 2026-03-10T13:25:45.370 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:45 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[79338]: ts=2026-03-10T13:25:45.105Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=3 maxSegment=5 2026-03-10T13:25:45.370 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:45 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[79338]: ts=2026-03-10T13:25:45.107Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=4 maxSegment=5 2026-03-10T13:25:45.370 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:45 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[79338]: ts=2026-03-10T13:25:45.107Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=5 maxSegment=5 2026-03-10T13:25:45.370 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:45 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[79338]: ts=2026-03-10T13:25:45.107Z caller=head.go:815 level=info component=tsdb msg="WAL replay completed" checkpoint_replay_duration=98.975µs wal_replay_duration=25.846554ms wbl_replay_duration=120ns total_replay_duration=25.95668ms 2026-03-10T13:25:45.370 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:45 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[79338]: ts=2026-03-10T13:25:45.109Z caller=main.go:1150 level=info fs_type=XFS_SUPER_MAGIC 2026-03-10T13:25:45.370 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:45 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[79338]: ts=2026-03-10T13:25:45.109Z caller=main.go:1153 level=info msg="TSDB started" 2026-03-10T13:25:45.370 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:45 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[79338]: ts=2026-03-10T13:25:45.109Z caller=main.go:1335 level=info msg="Loading configuration file" filename=/etc/prometheus/prometheus.yml 2026-03-10T13:25:45.370 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:45 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[79338]: ts=2026-03-10T13:25:45.118Z caller=main.go:1372 level=info msg="Completed loading of configuration file" filename=/etc/prometheus/prometheus.yml totalDuration=8.633632ms db_storage=652ns remote_storage=821ns web_handler=501ns query_engine=751ns scrape=646.381µs scrape_sd=82.695µs notify=9.958µs notify_sd=5.059µs rules=7.536037ms tracing=2.996µs 2026-03-10T13:25:45.370 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:45 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[79338]: ts=2026-03-10T13:25:45.118Z caller=main.go:1114 level=info msg="Server is ready to receive web requests." 2026-03-10T13:25:45.370 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:25:45 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[79338]: ts=2026-03-10T13:25:45.118Z caller=manager.go:163 level=info component="rule manager" msg="Starting rule manager..." 2026-03-10T13:25:45.399 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:45 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:25:45] ENGINE Bus STOPPING 2026-03-10T13:25:45.661 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:45 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:25:45] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-10T13:25:45.661 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:45 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:25:45] ENGINE Bus STOPPED 2026-03-10T13:25:45.661 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:45 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:25:45] ENGINE Bus STARTING 2026-03-10T13:25:45.661 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:45 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:25:45] ENGINE Serving on http://:::9283 2026-03-10T13:25:45.661 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:45 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:25:45] ENGINE Bus STARTED 2026-03-10T13:25:45.921 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:45 vm08 ceph-mon[49535]: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm00.dezodo ... 2026-03-10T13:25:45.921 INFO:journalctl@ceph.mon.b.vm08.stdout: Creating ceph-iscsi config... 2026-03-10T13:25:45.921 INFO:journalctl@ceph.mon.b.vm08.stdout: Write file: /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/iscsi.foo.vm00.dezodo/iscsi-gateway.cfg 2026-03-10T13:25:45.921 INFO:journalctl@ceph.mon.b.vm08.stdout: Write file: /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/iscsi.foo.vm00.dezodo/tcmu-runner-entrypoint.sh 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: Non-zero exit code 1 from systemctl restart ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: systemctl: stderr Job for ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service failed because the control process exited with error code. 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: systemctl: stderr See "systemctl status ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" and "journalctl -xeu ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" for details. 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: Traceback (most recent call last): 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: return _run_code(code, main_globals, None, 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: exec(code, run_globals) 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: RuntimeError: Failed command: systemctl restart ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo: Job for ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service failed because the control process exited with error code. 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: See "systemctl status ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" and "journalctl -xeu ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" for details. 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: Traceback (most recent call last): 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1145, in _check_daemons 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: self.mgr._daemon_action(daemon_spec, action=action) 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 2545, in _daemon_action 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: return self.wait_async( 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: return self.event_loop.get_result(coro, timeout) 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: return future.result(timeout) 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: return self.__get_result() 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: raise self._exception 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1381, in _create_daemon 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: out, err, code = await self._run_cephadm( 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1724, in _run_cephadm 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: raise OrchestratorError( 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: orchestrator._interface.OrchestratorError: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm00.dezodo ... 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: Creating ceph-iscsi config... 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: Write file: /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/iscsi.foo.vm00.dezodo/iscsi-gateway.cfg 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: Write file: /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/iscsi.foo.vm00.dezodo/tcmu-runner-entrypoint.sh 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: Non-zero exit code 1 from systemctl restart ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: systemctl: stderr Job for ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service failed because the control process exited with error code. 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: systemctl: stderr See "systemctl status ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" and "journalctl -xeu ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" for details. 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: Traceback (most recent call last): 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: return _run_code(code, main_globals, None, 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: exec(code, run_globals) 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: RuntimeError: Failed command: systemctl restart ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo: Job for ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service failed because the control process exited with error code. 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout: See "systemctl status ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" and "journalctl -xeu ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" for details. 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:45 vm08 ceph-mon[49535]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:45 vm08 ceph-mon[49535]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:45 vm08 ceph-mon[49535]: Reconfiguring daemon prometheus.a on vm08 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:45 vm08 ceph-mon[49535]: mgrmap e38: y(active, since 5s), standbys: x 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:45 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:45 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:45 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:45 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T13:25:45.922 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:45 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm08.local:9095"}]: dispatch 2026-03-10T13:25:45.923 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:45 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:45.923 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:45 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:25:45.923 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:45 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T13:25:45.923 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:45 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T13:25:45.923 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:45 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:25:45.923 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:45 vm08 systemd[1]: Stopping Ceph mgr.x for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:25:46.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:45 vm00 ceph-mon[47364]: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm00.dezodo ... 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: Creating ceph-iscsi config... 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: Write file: /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/iscsi.foo.vm00.dezodo/iscsi-gateway.cfg 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: Write file: /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/iscsi.foo.vm00.dezodo/tcmu-runner-entrypoint.sh 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: Non-zero exit code 1 from systemctl restart ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: systemctl: stderr Job for ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service failed because the control process exited with error code. 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: systemctl: stderr See "systemctl status ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" and "journalctl -xeu ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" for details. 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: Traceback (most recent call last): 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: return _run_code(code, main_globals, None, 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: exec(code, run_globals) 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: RuntimeError: Failed command: systemctl restart ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo: Job for ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service failed because the control process exited with error code. 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: See "systemctl status ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" and "journalctl -xeu ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" for details. 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: Traceback (most recent call last): 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1145, in _check_daemons 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: self.mgr._daemon_action(daemon_spec, action=action) 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 2545, in _daemon_action 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: return self.wait_async( 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: return self.event_loop.get_result(coro, timeout) 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: return future.result(timeout) 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: return self.__get_result() 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: raise self._exception 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1381, in _create_daemon 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: out, err, code = await self._run_cephadm( 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1724, in _run_cephadm 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: raise OrchestratorError( 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: orchestrator._interface.OrchestratorError: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm00.dezodo ... 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: Creating ceph-iscsi config... 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: Write file: /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/iscsi.foo.vm00.dezodo/iscsi-gateway.cfg 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: Write file: /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/iscsi.foo.vm00.dezodo/tcmu-runner-entrypoint.sh 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: Non-zero exit code 1 from systemctl restart ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: systemctl: stderr Job for ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service failed because the control process exited with error code. 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: systemctl: stderr See "systemctl status ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" and "journalctl -xeu ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" for details. 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: Traceback (most recent call last): 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: return _run_code(code, main_globals, None, 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: exec(code, run_globals) 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: RuntimeError: Failed command: systemctl restart ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo: Job for ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service failed because the control process exited with error code. 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout: See "systemctl status ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" and "journalctl -xeu ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" for details. 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:45 vm00 ceph-mon[47364]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:45 vm00 ceph-mon[47364]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:45 vm00 ceph-mon[47364]: Reconfiguring daemon prometheus.a on vm08 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:45 vm00 ceph-mon[47364]: mgrmap e38: y(active, since 5s), standbys: x 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:45 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T13:25:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:45 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:45 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:45 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:45 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm08.local:9095"}]: dispatch 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:45 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:45 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:45 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:45 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:45 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:45 vm00 ceph-mon[51670]: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm00.dezodo ... 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: Creating ceph-iscsi config... 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: Write file: /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/iscsi.foo.vm00.dezodo/iscsi-gateway.cfg 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: Write file: /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/iscsi.foo.vm00.dezodo/tcmu-runner-entrypoint.sh 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: Non-zero exit code 1 from systemctl restart ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: systemctl: stderr Job for ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service failed because the control process exited with error code. 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: systemctl: stderr See "systemctl status ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" and "journalctl -xeu ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" for details. 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: Traceback (most recent call last): 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: return _run_code(code, main_globals, None, 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: exec(code, run_globals) 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: RuntimeError: Failed command: systemctl restart ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo: Job for ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service failed because the control process exited with error code. 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: See "systemctl status ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" and "journalctl -xeu ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" for details. 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: Traceback (most recent call last): 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1145, in _check_daemons 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: self.mgr._daemon_action(daemon_spec, action=action) 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 2545, in _daemon_action 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: return self.wait_async( 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: return self.event_loop.get_result(coro, timeout) 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: return future.result(timeout) 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: return self.__get_result() 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: raise self._exception 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1381, in _create_daemon 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: out, err, code = await self._run_cephadm( 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1724, in _run_cephadm 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: raise OrchestratorError( 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: orchestrator._interface.OrchestratorError: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm00.dezodo ... 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: Creating ceph-iscsi config... 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: Write file: /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/iscsi.foo.vm00.dezodo/iscsi-gateway.cfg 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: Write file: /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/iscsi.foo.vm00.dezodo/tcmu-runner-entrypoint.sh 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: Non-zero exit code 1 from systemctl restart ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: systemctl: stderr Job for ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service failed because the control process exited with error code. 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: systemctl: stderr See "systemctl status ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" and "journalctl -xeu ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" for details. 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: Traceback (most recent call last): 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: return _run_code(code, main_globals, None, 2026-03-10T13:25:46.004 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-10T13:25:46.005 INFO:journalctl@ceph.mon.c.vm00.stdout: exec(code, run_globals) 2026-03-10T13:25:46.005 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-10T13:25:46.005 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-10T13:25:46.005 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-10T13:25:46.005 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-10T13:25:46.005 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-10T13:25:46.005 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-10T13:25:46.005 INFO:journalctl@ceph.mon.c.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-10T13:25:46.005 INFO:journalctl@ceph.mon.c.vm00.stdout: RuntimeError: Failed command: systemctl restart ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo: Job for ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service failed because the control process exited with error code. 2026-03-10T13:25:46.005 INFO:journalctl@ceph.mon.c.vm00.stdout: See "systemctl status ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" and "journalctl -xeu ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" for details. 2026-03-10T13:25:46.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:45 vm00 ceph-mon[51670]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-10T13:25:46.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:45 vm00 ceph-mon[51670]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:25:46.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:45 vm00 ceph-mon[51670]: Reconfiguring daemon prometheus.a on vm08 2026-03-10T13:25:46.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:45 vm00 ceph-mon[51670]: mgrmap e38: y(active, since 5s), standbys: x 2026-03-10T13:25:46.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:45 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T13:25:46.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:45 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:46.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:45 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:46.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:45 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T13:25:46.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:45 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm08.local:9095"}]: dispatch 2026-03-10T13:25:46.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:45 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:46.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:45 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:25:46.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:45 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T13:25:46.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:45 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T13:25:46.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:45 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:25:46.183 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:45 vm08 podman[79581]: 2026-03-10 13:25:45.920029332 +0000 UTC m=+0.055541483 container died 62b908c184a8253b0ba3d99c421442a9afa370f6aa242dd221137d96d770e057 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x, org.label-schema.schema-version=1.0, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T13:25:46.183 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:45 vm08 podman[79581]: 2026-03-10 13:25:45.945258359 +0000 UTC m=+0.080770499 container remove 62b908c184a8253b0ba3d99c421442a9afa370f6aa242dd221137d96d770e057 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, ceph=True, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T13:25:46.183 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:45 vm08 bash[79581]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x 2026-03-10T13:25:46.183 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:45 vm08 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mgr.x.service: Main process exited, code=exited, status=143/n/a 2026-03-10T13:25:46.183 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:46 vm08 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mgr.x.service: Failed with result 'exit-code'. 2026-03-10T13:25:46.184 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:46 vm08 systemd[1]: Stopped Ceph mgr.x for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:25:46.184 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:46 vm08 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mgr.x.service: Consumed 20.230s CPU time. 2026-03-10T13:25:46.489 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:46 vm08 systemd[1]: Starting Ceph mgr.x for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:25:46.490 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:46 vm08 podman[79695]: 2026-03-10 13:25:46.314650843 +0000 UTC m=+0.029197477 container create 31b91eebc8566a685ff6ef1ced5d07555b7ef2994a2998dc1182247f362c5f17 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T13:25:46.490 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:46 vm08 podman[79695]: 2026-03-10 13:25:46.359750567 +0000 UTC m=+0.074297191 container init 31b91eebc8566a685ff6ef1ced5d07555b7ef2994a2998dc1182247f362c5f17 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T13:25:46.490 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:46 vm08 podman[79695]: 2026-03-10 13:25:46.362851953 +0000 UTC m=+0.077398587 container start 31b91eebc8566a685ff6ef1ced5d07555b7ef2994a2998dc1182247f362c5f17 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, ceph=True) 2026-03-10T13:25:46.490 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:46 vm08 bash[79695]: 31b91eebc8566a685ff6ef1ced5d07555b7ef2994a2998dc1182247f362c5f17 2026-03-10T13:25:46.490 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:46 vm08 podman[79695]: 2026-03-10 13:25:46.306641739 +0000 UTC m=+0.021188374 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:25:46.490 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:46 vm08 systemd[1]: Started Ceph mgr.x for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:25:46.773 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:46 vm08 ceph-mon[49535]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T13:25:46.773 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:46 vm08 ceph-mon[49535]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm08.local:9095"}]: dispatch 2026-03-10T13:25:46.773 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:46 vm08 ceph-mon[49535]: Upgrade: Updating mgr.x 2026-03-10T13:25:46.773 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:46 vm08 ceph-mon[49535]: Deploying daemon mgr.x on vm08 2026-03-10T13:25:46.773 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:46 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:46.773 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:46 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:46.773 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:46 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:25:46.488+0000 7f7f93d36140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-10T13:25:46.773 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:46 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:25:46.528+0000 7f7f93d36140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T13:25:46.982 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:46 vm00 ceph-mon[47364]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T13:25:46.982 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:46 vm00 ceph-mon[47364]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm08.local:9095"}]: dispatch 2026-03-10T13:25:46.982 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:46 vm00 ceph-mon[47364]: Upgrade: Updating mgr.x 2026-03-10T13:25:46.983 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:46 vm00 ceph-mon[47364]: Deploying daemon mgr.x on vm08 2026-03-10T13:25:46.983 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:46 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:46.983 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:46 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:46.983 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:46 vm00 ceph-mon[51670]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T13:25:46.983 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:46 vm00 ceph-mon[51670]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm08.local:9095"}]: dispatch 2026-03-10T13:25:46.983 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:46 vm00 ceph-mon[51670]: Upgrade: Updating mgr.x 2026-03-10T13:25:46.983 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:46 vm00 ceph-mon[51670]: Deploying daemon mgr.x on vm08 2026-03-10T13:25:46.983 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:46 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:46.983 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:46 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:47.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:25:46 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:25:46.981Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:25:47.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:25:46 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:25:46.983Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:25:47.263 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:47 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:25:47.072+0000 7f7f93d36140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T13:25:47.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:47 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:25:47.379+0000 7f7f93d36140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T13:25:47.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:47 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-10T13:25:47.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:47 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-10T13:25:47.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:47 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: from numpy import show_config as show_numpy_config 2026-03-10T13:25:47.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:47 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:25:47.461+0000 7f7f93d36140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T13:25:47.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:47 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:25:47.497+0000 7f7f93d36140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T13:25:48.001 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:47 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:25:47.565+0000 7f7f93d36140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T13:25:48.001 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:47 vm08 ceph-mon[49535]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:25:48.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:47 vm00 ceph-mon[47364]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:25:48.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:47 vm00 ceph-mon[51670]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:25:48.270 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:48 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:25:48.080+0000 7f7f93d36140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T13:25:48.270 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:48 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:25:48.195+0000 7f7f93d36140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T13:25:48.270 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:48 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:25:48.233+0000 7f7f93d36140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T13:25:48.538 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:48 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:25:48.277+0000 7f7f93d36140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T13:25:48.539 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:48 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:25:48.317+0000 7f7f93d36140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T13:25:48.539 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:48 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:25:48.356+0000 7f7f93d36140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T13:25:48.539 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:48 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:25:48.536+0000 7f7f93d36140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T13:25:48.823 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:48 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:25:48.590+0000 7f7f93d36140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T13:25:48.823 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:48 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:25:48.817+0000 7f7f93d36140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-10T13:25:49.401 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:49 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:25:49.117+0000 7f7f93d36140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T13:25:49.401 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:49 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:25:49.161+0000 7f7f93d36140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T13:25:49.401 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:49 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:25:49.211+0000 7f7f93d36140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T13:25:49.401 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:49 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:25:49.290+0000 7f7f93d36140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T13:25:49.401 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:49 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:25:49.324+0000 7f7f93d36140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T13:25:49.401 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:49 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:25:49.399+0000 7f7f93d36140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T13:25:49.681 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:49 vm08 ceph-mon[49535]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-10T13:25:49.681 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:49 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:49.681 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:49 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:49.681 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:49 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:49.681 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:49 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:49.681 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:49 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:25:49.515+0000 7f7f93d36140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T13:25:49.681 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:49 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:25:49.644+0000 7f7f93d36140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T13:25:49.681 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:49 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:25:49.679+0000 7f7f93d36140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T13:25:49.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:49 vm00 ceph-mon[47364]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-10T13:25:49.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:49 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:49.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:49 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:49.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:49 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:49.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:49 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:49.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:49 vm00 ceph-mon[51670]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-10T13:25:49.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:49 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:49.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:49 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:49.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:49 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:49.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:49 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:49.943 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:49 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: [10/Mar/2026:13:25:49] ENGINE Bus STARTING 2026-03-10T13:25:49.943 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:49 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: CherryPy Checker: 2026-03-10T13:25:49.943 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:49 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: The Application mounted at '' has an empty config. 2026-03-10T13:25:49.943 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:49 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:25:49.943 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:49 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: [10/Mar/2026:13:25:49] ENGINE Serving on http://:::9283 2026-03-10T13:25:49.943 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:25:49 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: [10/Mar/2026:13:25:49] ENGINE Bus STARTED 2026-03-10T13:25:50.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:50 vm00 ceph-mon[47364]: Standby manager daemon x restarted 2026-03-10T13:25:50.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:50 vm00 ceph-mon[47364]: from='mgr.? 192.168.123.108:0/285065039' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T13:25:50.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:50 vm00 ceph-mon[47364]: Standby manager daemon x started 2026-03-10T13:25:50.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:50 vm00 ceph-mon[47364]: from='mgr.? 192.168.123.108:0/285065039' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T13:25:50.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:50 vm00 ceph-mon[47364]: from='mgr.? 192.168.123.108:0/285065039' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T13:25:50.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:50 vm00 ceph-mon[47364]: from='mgr.? 192.168.123.108:0/285065039' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T13:25:50.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:50 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:50.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:50 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:50.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:50 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:25:50.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:50 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:25:50.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:50 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:50.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:50 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:25:50.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:50 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:25:50.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:50 vm00 ceph-mon[51670]: Standby manager daemon x restarted 2026-03-10T13:25:50.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:50 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.108:0/285065039' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T13:25:50.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:50 vm00 ceph-mon[51670]: Standby manager daemon x started 2026-03-10T13:25:50.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:50 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.108:0/285065039' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T13:25:50.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:50 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.108:0/285065039' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T13:25:50.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:50 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.108:0/285065039' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T13:25:50.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:50 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:50.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:50 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:50.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:50 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:25:50.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:50 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:25:50.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:50 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:50.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:50 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:25:50.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:50 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:25:50.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:50 vm08 ceph-mon[49535]: Standby manager daemon x restarted 2026-03-10T13:25:50.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:50 vm08 ceph-mon[49535]: from='mgr.? 192.168.123.108:0/285065039' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T13:25:50.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:50 vm08 ceph-mon[49535]: Standby manager daemon x started 2026-03-10T13:25:50.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:50 vm08 ceph-mon[49535]: from='mgr.? 192.168.123.108:0/285065039' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T13:25:50.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:50 vm08 ceph-mon[49535]: from='mgr.? 192.168.123.108:0/285065039' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T13:25:50.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:50 vm08 ceph-mon[49535]: from='mgr.? 192.168.123.108:0/285065039' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T13:25:50.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:50 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:50.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:50 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:50.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:50 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:25:50.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:50 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:25:50.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:50 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:50.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:50 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:25:50.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:50 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:25:51.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: Reconfiguring iscsi.foo.vm00.dezodo (dependencies changed)... 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: Reconfiguring daemon iscsi.foo.vm00.dezodo on vm00 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: mgrmap e39: y(active, since 11s), standbys: x 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: Upgrade: Setting container_image for all mgr 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]: dispatch 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]': finished 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]: dispatch 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]': finished 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: Upgrade: Setting container_image for all crash 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: Upgrade: Setting container_image for all mds 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: Upgrade: Setting container_image for all rbd-mirror 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: Upgrade: Setting container_image for all cephfs-mirror 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: Upgrade: Setting container_image for all ceph-exporter 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: Upgrade: Setting container_image for all nfs 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: Upgrade: Setting container_image for all nvmeof 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: Upgrade: Setting container_image for all node-exporter 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: Upgrade: Setting container_image for all prometheus 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: Upgrade: Setting container_image for all alertmanager 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2803908099' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: Reconfiguring iscsi.foo.vm00.dezodo (dependencies changed)... 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: Reconfiguring daemon iscsi.foo.vm00.dezodo on vm00 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: mgrmap e39: y(active, since 11s), standbys: x 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T13:25:51.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T13:25:51.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T13:25:51.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T13:25:51.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:25:51.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:51.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: Upgrade: Setting container_image for all mgr 2026-03-10T13:25:51.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:51.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]: dispatch 2026-03-10T13:25:51.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]': finished 2026-03-10T13:25:51.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]: dispatch 2026-03-10T13:25:51.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]': finished 2026-03-10T13:25:51.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:51.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: Upgrade: Setting container_image for all crash 2026-03-10T13:25:51.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:51.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:51.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: Upgrade: Setting container_image for all mds 2026-03-10T13:25:51.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:51.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:51.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: Upgrade: Setting container_image for all rbd-mirror 2026-03-10T13:25:51.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:51.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:51.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: Upgrade: Setting container_image for all cephfs-mirror 2026-03-10T13:25:51.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:51.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:51.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: Upgrade: Setting container_image for all ceph-exporter 2026-03-10T13:25:51.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:51.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:51.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: Upgrade: Setting container_image for all nfs 2026-03-10T13:25:51.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:51.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:51.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: Upgrade: Setting container_image for all nvmeof 2026-03-10T13:25:51.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:51.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:51.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: Upgrade: Setting container_image for all node-exporter 2026-03-10T13:25:51.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:51.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: Upgrade: Setting container_image for all prometheus 2026-03-10T13:25:51.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:51.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: Upgrade: Setting container_image for all alertmanager 2026-03-10T13:25:51.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:51 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2803908099' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T13:25:51.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: Reconfiguring iscsi.foo.vm00.dezodo (dependencies changed)... 2026-03-10T13:25:51.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: Reconfiguring daemon iscsi.foo.vm00.dezodo on vm00 2026-03-10T13:25:51.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-10T13:25:51.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: mgrmap e39: y(active, since 11s), standbys: x 2026-03-10T13:25:51.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:51.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:51.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T13:25:51.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T13:25:51.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T13:25:51.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T13:25:51.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:25:51.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:51.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: Upgrade: Setting container_image for all mgr 2026-03-10T13:25:51.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:51.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]: dispatch 2026-03-10T13:25:51.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]': finished 2026-03-10T13:25:51.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]: dispatch 2026-03-10T13:25:51.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]': finished 2026-03-10T13:25:51.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:51.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: Upgrade: Setting container_image for all crash 2026-03-10T13:25:51.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:51.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:51.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: Upgrade: Setting container_image for all mds 2026-03-10T13:25:51.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:51.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:51.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: Upgrade: Setting container_image for all rbd-mirror 2026-03-10T13:25:51.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:51.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:51.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: Upgrade: Setting container_image for all cephfs-mirror 2026-03-10T13:25:51.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:51.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:51.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: Upgrade: Setting container_image for all ceph-exporter 2026-03-10T13:25:51.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:51.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:51.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: Upgrade: Setting container_image for all nfs 2026-03-10T13:25:51.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:51.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:51.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: Upgrade: Setting container_image for all nvmeof 2026-03-10T13:25:51.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:51.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:51.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: Upgrade: Setting container_image for all node-exporter 2026-03-10T13:25:51.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:51.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: Upgrade: Setting container_image for all prometheus 2026-03-10T13:25:51.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:25:51.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: Upgrade: Setting container_image for all alertmanager 2026-03-10T13:25:51.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:51 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/2803908099' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T13:25:52.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:52 vm00 ceph-mon[47364]: Upgrade: Updating grafana.a 2026-03-10T13:25:52.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:52 vm00 ceph-mon[47364]: Deploying daemon grafana.a on vm08 2026-03-10T13:25:52.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:52 vm00 ceph-mon[51670]: Upgrade: Updating grafana.a 2026-03-10T13:25:52.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:52 vm00 ceph-mon[51670]: Deploying daemon grafana.a on vm08 2026-03-10T13:25:52.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:52 vm08 ceph-mon[49535]: Upgrade: Updating grafana.a 2026-03-10T13:25:52.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:52 vm08 ceph-mon[49535]: Deploying daemon grafana.a on vm08 2026-03-10T13:25:53.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:53 vm00 ceph-mon[47364]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-10T13:25:53.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:53 vm00 ceph-mon[51670]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-10T13:25:53.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:53 vm08 ceph-mon[49535]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-10T13:25:56.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:55 vm00 ceph-mon[47364]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-10T13:25:56.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:55 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:25:56.002 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:25:55 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:25:55] "GET /metrics HTTP/1.1" 200 37550 "" "Prometheus/2.51.0" 2026-03-10T13:25:56.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:55 vm00 ceph-mon[51670]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-10T13:25:56.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:55 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:25:56.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:55 vm08 ceph-mon[49535]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-10T13:25:56.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:55 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:25:57.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:25:56 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:25:56.982Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:25:57.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:25:56 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:25:56.983Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:25:57.518 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 systemd[1]: Stopping Ceph grafana.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:25:57.518 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[71583]: t=2026-03-10T13:25:57+0000 lvl=info msg="Shutdown started" logger=server reason="System signal: terminated" 2026-03-10T13:25:57.518 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 podman[80902]: 2026-03-10 13:25:57.342100338 +0000 UTC m=+0.028159084 container died 263cac442a993903ce76fa4340f496a0ef92b93fbe9ee409eb8e6ba0ca84905c (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a, version=8.5, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.buildah.version=1.24.2, architecture=x86_64, io.openshift.tags=base rhel8, vendor=Red Hat, Inc., vcs-type=git, description=Ceph Grafana Container, name=ubi8, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, com.redhat.component=ubi8-container, io.openshift.expose-services=, distribution-scope=public, release=236.1648460182, summary=Grafana Container configured for Ceph mgr/dashboard integration, build-date=2022-03-28T10:36:18.413762, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, maintainer=Paul Cuzner , io.k8s.display-name=Red Hat Universal Base Image 8) 2026-03-10T13:25:57.518 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 podman[80902]: 2026-03-10 13:25:57.364134425 +0000 UTC m=+0.050193160 container remove 263cac442a993903ce76fa4340f496a0ef92b93fbe9ee409eb8e6ba0ca84905c (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a, architecture=x86_64, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, version=8.5, io.openshift.expose-services=, release=236.1648460182, summary=Grafana Container configured for Ceph mgr/dashboard integration, maintainer=Paul Cuzner , io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.component=ubi8-container, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, io.openshift.tags=base rhel8, build-date=2022-03-28T10:36:18.413762, description=Ceph Grafana Container, io.buildah.version=1.24.2, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, name=ubi8, vendor=Red Hat, Inc., vcs-type=git, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, distribution-scope=public, io.k8s.display-name=Red Hat Universal Base Image 8) 2026-03-10T13:25:57.518 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 bash[80902]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a 2026-03-10T13:25:57.518 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@grafana.a.service: Deactivated successfully. 2026-03-10T13:25:57.518 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 systemd[1]: Stopped Ceph grafana.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:25:57.518 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@grafana.a.service: Consumed 2.109s CPU time. 2026-03-10T13:25:57.870 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 systemd[1]: Starting Ceph grafana.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:25:57.870 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 podman[81011]: 2026-03-10 13:25:57.715339824 +0000 UTC m=+0.019703244 container create 960e32589e98212c79bf72d38fe81dc1625621590d58979c78e62827d5495306 (image=quay.io/ceph/grafana:10.4.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a, maintainer=Grafana Labs ) 2026-03-10T13:25:57.870 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 podman[81011]: 2026-03-10 13:25:57.751034041 +0000 UTC m=+0.055397451 container init 960e32589e98212c79bf72d38fe81dc1625621590d58979c78e62827d5495306 (image=quay.io/ceph/grafana:10.4.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a, maintainer=Grafana Labs ) 2026-03-10T13:25:57.870 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 podman[81011]: 2026-03-10 13:25:57.753836758 +0000 UTC m=+0.058200178 container start 960e32589e98212c79bf72d38fe81dc1625621590d58979c78e62827d5495306 (image=quay.io/ceph/grafana:10.4.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a, maintainer=Grafana Labs ) 2026-03-10T13:25:57.870 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 bash[81011]: 960e32589e98212c79bf72d38fe81dc1625621590d58979c78e62827d5495306 2026-03-10T13:25:57.870 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 podman[81011]: 2026-03-10 13:25:57.707049245 +0000 UTC m=+0.011412665 image pull c8b91775d855b99270fc5d22f3c6737e8cca01ef4c25c8b0362295e0746fa39b quay.io/ceph/grafana:10.4.0 2026-03-10T13:25:57.870 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 systemd[1]: Started Ceph grafana.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:25:58.122 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=settings t=2026-03-10T13:25:57.868283346Z level=info msg="Starting Grafana" version=10.4.0 commit=03f502a94d17f7dc4e6c34acdf8428aedd986e4c branch=HEAD compiled=2026-03-10T13:25:57Z 2026-03-10T13:25:58.122 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=settings t=2026-03-10T13:25:57.868425692Z level=info msg="Config loaded from" file=/usr/share/grafana/conf/defaults.ini 2026-03-10T13:25:58.122 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=settings t=2026-03-10T13:25:57.86843001Z level=info msg="Config loaded from" file=/etc/grafana/grafana.ini 2026-03-10T13:25:58.122 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=settings t=2026-03-10T13:25:57.868432555Z level=info msg="Config overridden from command line" arg="default.paths.data=/var/lib/grafana" 2026-03-10T13:25:58.122 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=settings t=2026-03-10T13:25:57.868434538Z level=info msg="Config overridden from command line" arg="default.paths.logs=/var/log/grafana" 2026-03-10T13:25:58.122 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=settings t=2026-03-10T13:25:57.868436482Z level=info msg="Config overridden from command line" arg="default.paths.plugins=/var/lib/grafana/plugins" 2026-03-10T13:25:58.122 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=settings t=2026-03-10T13:25:57.868438867Z level=info msg="Config overridden from command line" arg="default.paths.provisioning=/etc/grafana/provisioning" 2026-03-10T13:25:58.122 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=settings t=2026-03-10T13:25:57.868440741Z level=info msg="Config overridden from command line" arg="default.log.mode=console" 2026-03-10T13:25:58.122 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=settings t=2026-03-10T13:25:57.868442754Z level=info msg="Config overridden from Environment variable" var="GF_PATHS_DATA=/var/lib/grafana" 2026-03-10T13:25:58.122 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=settings t=2026-03-10T13:25:57.868445169Z level=info msg="Config overridden from Environment variable" var="GF_PATHS_LOGS=/var/log/grafana" 2026-03-10T13:25:58.122 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=settings t=2026-03-10T13:25:57.868447102Z level=info msg="Config overridden from Environment variable" var="GF_PATHS_PLUGINS=/var/lib/grafana/plugins" 2026-03-10T13:25:58.122 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=settings t=2026-03-10T13:25:57.868449066Z level=info msg="Config overridden from Environment variable" var="GF_PATHS_PROVISIONING=/etc/grafana/provisioning" 2026-03-10T13:25:58.122 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=settings t=2026-03-10T13:25:57.868451109Z level=info msg=Target target=[all] 2026-03-10T13:25:58.122 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=settings t=2026-03-10T13:25:57.868455168Z level=info msg="Path Home" path=/usr/share/grafana 2026-03-10T13:25:58.122 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=settings t=2026-03-10T13:25:57.868457412Z level=info msg="Path Data" path=/var/lib/grafana 2026-03-10T13:25:58.122 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=settings t=2026-03-10T13:25:57.868459516Z level=info msg="Path Logs" path=/var/log/grafana 2026-03-10T13:25:58.122 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=settings t=2026-03-10T13:25:57.868461259Z level=info msg="Path Plugins" path=/var/lib/grafana/plugins 2026-03-10T13:25:58.122 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=settings t=2026-03-10T13:25:57.868463192Z level=info msg="Path Provisioning" path=/etc/grafana/provisioning 2026-03-10T13:25:58.122 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=settings t=2026-03-10T13:25:57.868465156Z level=info msg="App mode production" 2026-03-10T13:25:58.122 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=sqlstore t=2026-03-10T13:25:57.868634794Z level=info msg="Connecting to DB" dbtype=sqlite3 2026-03-10T13:25:58.122 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=sqlstore t=2026-03-10T13:25:57.86864306Z level=warn msg="SQLite database file has broader permissions than it should" path=/var/lib/grafana/grafana.db mode=-rw-r--r-- expected=-rw-r----- 2026-03-10T13:25:58.122 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.868952138Z level=info msg="Starting DB migrations" 2026-03-10T13:25:58.122 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.875418663Z level=info msg="Executing migration" id="Update is_service_account column to nullable" 2026-03-10T13:25:58.122 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.898209155Z level=info msg="Migration successfully executed" id="Update is_service_account column to nullable" duration=22.786525ms 2026-03-10T13:25:58.122 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.899147472Z level=info msg="Executing migration" id="Add uid column to user" 2026-03-10T13:25:58.122 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.901251991Z level=info msg="Migration successfully executed" id="Add uid column to user" duration=2.10442ms 2026-03-10T13:25:58.122 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.901877052Z level=info msg="Executing migration" id="Update uid column values for users" 2026-03-10T13:25:58.122 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.901970327Z level=info msg="Migration successfully executed" id="Update uid column values for users" duration=93.455µs 2026-03-10T13:25:58.122 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.902528021Z level=info msg="Executing migration" id="Add unique index user_uid" 2026-03-10T13:25:58.122 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.902981199Z level=info msg="Migration successfully executed" id="Add unique index user_uid" duration=452.737µs 2026-03-10T13:25:58.122 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.90384757Z level=info msg="Executing migration" id="Add isPublic for dashboard" 2026-03-10T13:25:58.122 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.905954735Z level=info msg="Migration successfully executed" id="Add isPublic for dashboard" duration=2.106314ms 2026-03-10T13:25:58.122 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.906493484Z level=info msg="Executing migration" id="set service account foreign key to nil if 0" 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.90660383Z level=info msg="Migration successfully executed" id="set service account foreign key to nil if 0" duration=109.816µs 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.907139944Z level=info msg="Executing migration" id="Add last_used_at to api_key table" 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.909248631Z level=info msg="Migration successfully executed" id="Add last_used_at to api_key table" duration=2.108437ms 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.909899059Z level=info msg="Executing migration" id="Add is_revoked column to api_key table" 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.911962852Z level=info msg="Migration successfully executed" id="Add is_revoked column to api_key table" duration=2.063883ms 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.912712916Z level=info msg="Executing migration" id="Add playlist column created_at" 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.915021177Z level=info msg="Migration successfully executed" id="Add playlist column created_at" duration=2.305907ms 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.915709776Z level=info msg="Executing migration" id="Add playlist column updated_at" 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.917798056Z level=info msg="Migration successfully executed" id="Add playlist column updated_at" duration=2.088239ms 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.918499578Z level=info msg="Executing migration" id="Add column preferences.json_data" 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.920643422Z level=info msg="Migration successfully executed" id="Add column preferences.json_data" duration=2.143052ms 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.921231492Z level=info msg="Executing migration" id="alter preferences.json_data to mediumtext v1" 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.921289391Z level=info msg="Migration successfully executed" id="alter preferences.json_data to mediumtext v1" duration=58.45µs 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.921977028Z level=info msg="Executing migration" id="Add preferences index org_id" 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.9225074Z level=info msg="Migration successfully executed" id="Add preferences index org_id" duration=529.961µs 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.923219604Z level=info msg="Executing migration" id="Add preferences index user_id" 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.927911688Z level=info msg="Migration successfully executed" id="Add preferences index user_id" duration=4.691873ms 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.928776255Z level=info msg="Executing migration" id="Increase tags column to length 4096" 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.928836579Z level=info msg="Migration successfully executed" id="Increase tags column to length 4096" duration=60.382µs 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.929701979Z level=info msg="Executing migration" id="Add column uid in team" 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.932019226Z level=info msg="Migration successfully executed" id="Add column uid in team" duration=2.316256ms 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.934889199Z level=info msg="Executing migration" id="Update uid column values in team" 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.935013672Z level=info msg="Migration successfully executed" id="Update uid column values in team" duration=124.543µs 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.935634505Z level=info msg="Executing migration" id="Add unique index team_org_id_uid" 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.936174214Z level=info msg="Migration successfully executed" id="Add unique index team_org_id_uid" duration=539.659µs 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.937019457Z level=info msg="Executing migration" id="Add OAuth ID token to user_auth" 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.939167187Z level=info msg="Migration successfully executed" id="Add OAuth ID token to user_auth" duration=2.14683ms 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.939917952Z level=info msg="Executing migration" id="add index user_auth_token.revoked_at" 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.940427967Z level=info msg="Migration successfully executed" id="add index user_auth_token.revoked_at" duration=508.272µs 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.941057355Z level=info msg="Executing migration" id="alter table short_url alter column created_by type to bigint" 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.941118159Z level=info msg="Migration successfully executed" id="alter table short_url alter column created_by type to bigint" duration=60.553µs 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.941778105Z level=info msg="Executing migration" id="add current_reason column related to current_state" 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.945702961Z level=info msg="Migration successfully executed" id="add current_reason column related to current_state" duration=3.923075ms 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.946786341Z level=info msg="Executing migration" id="add result_fingerprint column to alert_instance" 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.948929361Z level=info msg="Migration successfully executed" id="add result_fingerprint column to alert_instance" duration=2.143061ms 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.949549723Z level=info msg="Executing migration" id="add rule_group_idx column to alert_rule" 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.951574011Z level=info msg="Migration successfully executed" id="add rule_group_idx column to alert_rule" duration=2.02481ms 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.952134932Z level=info msg="Executing migration" id="add is_paused column to alert_rule table" 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.954247878Z level=info msg="Migration successfully executed" id="add is_paused column to alert_rule table" duration=2.113436ms 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.954903605Z level=info msg="Executing migration" id="fix is_paused column for alert_rule table" 2026-03-10T13:25:58.123 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.954960723Z level=info msg="Migration successfully executed" id="fix is_paused column for alert_rule table" duration=57.357µs 2026-03-10T13:25:58.124 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.955566806Z level=info msg="Executing migration" id="add rule_group_idx column to alert_rule_version" 2026-03-10T13:25:58.124 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.95759866Z level=info msg="Migration successfully executed" id="add rule_group_idx column to alert_rule_version" duration=2.031273ms 2026-03-10T13:25:58.124 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.958643465Z level=info msg="Executing migration" id="add is_paused column to alert_rule_versions table" 2026-03-10T13:25:58.124 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.960724622Z level=info msg="Migration successfully executed" id="add is_paused column to alert_rule_versions table" duration=2.080635ms 2026-03-10T13:25:58.124 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.961405256Z level=info msg="Executing migration" id="fix is_paused column for alert_rule_version table" 2026-03-10T13:25:58.124 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.961468865Z level=info msg="Migration successfully executed" id="fix is_paused column for alert_rule_version table" duration=63.82µs 2026-03-10T13:25:58.124 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.962068957Z level=info msg="Executing migration" id="add configuration_hash column to alert_configuration" 2026-03-10T13:25:58.124 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.964241384Z level=info msg="Migration successfully executed" id="add configuration_hash column to alert_configuration" duration=2.171565ms 2026-03-10T13:25:58.124 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.964897773Z level=info msg="Executing migration" id="add column send_alerts_to in ngalert_configuration" 2026-03-10T13:25:58.124 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.968265538Z level=info msg="Migration successfully executed" id="add column send_alerts_to in ngalert_configuration" duration=3.366081ms 2026-03-10T13:25:58.124 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.969022625Z level=info msg="Executing migration" id="create provenance_type table" 2026-03-10T13:25:58.124 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.969702238Z level=info msg="Migration successfully executed" id="create provenance_type table" duration=668.681µs 2026-03-10T13:25:58.124 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.970495863Z level=info msg="Executing migration" id="add index to uniquify (record_key, record_type, org_id) columns" 2026-03-10T13:25:58.124 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.971087912Z level=info msg="Migration successfully executed" id="add index to uniquify (record_key, record_type, org_id) columns" duration=591.708µs 2026-03-10T13:25:58.124 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.971759749Z level=info msg="Executing migration" id="create alert_image table" 2026-03-10T13:25:58.124 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.972185827Z level=info msg="Migration successfully executed" id="create alert_image table" duration=426.189µs 2026-03-10T13:25:58.124 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.972950538Z level=info msg="Executing migration" id="add unique index on token to alert_image table" 2026-03-10T13:25:58.124 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.973429314Z level=info msg="Migration successfully executed" id="add unique index on token to alert_image table" duration=479.046µs 2026-03-10T13:25:58.124 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.975229294Z level=info msg="Executing migration" id="support longer URLs in alert_image table" 2026-03-10T13:25:58.124 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.975288184Z level=info msg="Migration successfully executed" id="support longer URLs in alert_image table" duration=59.251µs 2026-03-10T13:25:58.124 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.975988535Z level=info msg="Executing migration" id=create_alert_configuration_history_table 2026-03-10T13:25:58.124 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.976458755Z level=info msg="Migration successfully executed" id=create_alert_configuration_history_table duration=470.22µs 2026-03-10T13:25:58.124 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.977115544Z level=info msg="Executing migration" id="drop non-unique orgID index on alert_configuration" 2026-03-10T13:25:58.124 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.977612586Z level=info msg="Migration successfully executed" id="drop non-unique orgID index on alert_configuration" duration=496.951µs 2026-03-10T13:25:58.124 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.978244608Z level=info msg="Executing migration" id="drop unique orgID index on alert_configuration if exists" 2026-03-10T13:25:58.124 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.978447558Z level=warn msg="Skipping migration: Already executed, but not recorded in migration log" id="drop unique orgID index on alert_configuration if exists" 2026-03-10T13:25:58.124 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.979010872Z level=info msg="Executing migration" id="extract alertmanager configuration history to separate table" 2026-03-10T13:25:58.124 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.979569829Z level=info msg="Migration successfully executed" id="extract alertmanager configuration history to separate table" duration=558.697µs 2026-03-10T13:25:58.124 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.980152991Z level=info msg="Executing migration" id="add unique index on orgID to alert_configuration" 2026-03-10T13:25:58.124 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.980668635Z level=info msg="Migration successfully executed" id="add unique index on orgID to alert_configuration" duration=515.485µs 2026-03-10T13:25:58.124 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.981227902Z level=info msg="Executing migration" id="add last_applied column to alert_configuration_history" 2026-03-10T13:25:58.124 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.984833944Z level=info msg="Migration successfully executed" id="add last_applied column to alert_configuration_history" duration=3.603035ms 2026-03-10T13:25:58.124 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.98608786Z level=info msg="Executing migration" id="increase max description length to 2048" 2026-03-10T13:25:58.124 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.986153052Z level=info msg="Migration successfully executed" id="increase max description length to 2048" duration=65.021µs 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.986815793Z level=info msg="Executing migration" id="alter library_element model to mediumtext" 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.986879542Z level=info msg="Migration successfully executed" id="alter library_element model to mediumtext" duration=64.049µs 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.987441895Z level=info msg="Executing migration" id="create secrets table" 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.988054342Z level=info msg="Migration successfully executed" id="create secrets table" duration=610.513µs 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:57.988765342Z level=info msg="Executing migration" id="rename data_keys name column to id" 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.003775581Z level=info msg="Migration successfully executed" id="rename data_keys name column to id" duration=15.006181ms 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.004739815Z level=info msg="Executing migration" id="add name column into data_keys" 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.00737591Z level=info msg="Migration successfully executed" id="add name column into data_keys" duration=2.634741ms 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.00817646Z level=info msg="Executing migration" id="copy data_keys id column values into name" 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.008374029Z level=info msg="Migration successfully executed" id="copy data_keys id column values into name" duration=198.131µs 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.009102683Z level=info msg="Executing migration" id="rename data_keys name column to label" 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.023416156Z level=info msg="Migration successfully executed" id="rename data_keys name column to label" duration=14.30681ms 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.024062477Z level=info msg="Executing migration" id="rename data_keys id column back to name" 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.036582965Z level=info msg="Migration successfully executed" id="rename data_keys id column back to name" duration=12.520999ms 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.038347037Z level=info msg="Executing migration" id="add column hidden to role table" 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.041673754Z level=info msg="Migration successfully executed" id="add column hidden to role table" duration=3.324804ms 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.042912594Z level=info msg="Executing migration" id="permission kind migration" 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.045816208Z level=info msg="Migration successfully executed" id="permission kind migration" duration=2.900479ms 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.046432272Z level=info msg="Executing migration" id="permission attribute migration" 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.048566717Z level=info msg="Migration successfully executed" id="permission attribute migration" duration=2.134265ms 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.049179023Z level=info msg="Executing migration" id="permission identifier migration" 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.0525902Z level=info msg="Migration successfully executed" id="permission identifier migration" duration=3.409604ms 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.053371121Z level=info msg="Executing migration" id="add permission identifier index" 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.053836533Z level=info msg="Migration successfully executed" id="add permission identifier index" duration=465.532µs 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.054680262Z level=info msg="Executing migration" id="add permission action scope role_id index" 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.055124574Z level=info msg="Migration successfully executed" id="add permission action scope role_id index" duration=444.313µs 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.055832098Z level=info msg="Executing migration" id="remove permission role_id action scope index" 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.056278774Z level=info msg="Migration successfully executed" id="remove permission role_id action scope index" duration=446.707µs 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.057096945Z level=info msg="Executing migration" id="create query_history table v1" 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.05755831Z level=info msg="Migration successfully executed" id="create query_history table v1" duration=461.284µs 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.058138305Z level=info msg="Executing migration" id="add index query_history.org_id-created_by-datasource_uid" 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.058649703Z level=info msg="Migration successfully executed" id="add index query_history.org_id-created_by-datasource_uid" duration=511.267µs 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.059588289Z level=info msg="Executing migration" id="alter table query_history alter column created_by type to bigint" 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.059672386Z level=info msg="Migration successfully executed" id="alter table query_history alter column created_by type to bigint" duration=84.608µs 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.060278982Z level=info msg="Executing migration" id="rbac disabled migrator" 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.060401732Z level=info msg="Migration successfully executed" id="rbac disabled migrator" duration=122.93µs 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.061707206Z level=info msg="Executing migration" id="teams permissions migration" 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.06197109Z level=info msg="Migration successfully executed" id="teams permissions migration" duration=264.003µs 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.062645042Z level=info msg="Executing migration" id="dashboard permissions" 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.065080842Z level=info msg="Migration successfully executed" id="dashboard permissions" duration=2.4357ms 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.065800178Z level=info msg="Executing migration" id="dashboard permissions uid scopes" 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.067590008Z level=info msg="Migration successfully executed" id="dashboard permissions uid scopes" duration=1.78974ms 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.068432375Z level=info msg="Executing migration" id="drop managed folder create actions" 2026-03-10T13:25:58.125 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.068586464Z level=info msg="Migration successfully executed" id="drop managed folder create actions" duration=153.126µs 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.069562921Z level=info msg="Executing migration" id="alerting notification permissions" 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.069827497Z level=info msg="Migration successfully executed" id="alerting notification permissions" duration=264.616µs 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.070441637Z level=info msg="Executing migration" id="create query_history_star table v1" 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.070917437Z level=info msg="Migration successfully executed" id="create query_history_star table v1" duration=475.74µs 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.071825607Z level=info msg="Executing migration" id="add index query_history.user_id-query_uid" 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.072399081Z level=info msg="Migration successfully executed" id="add index query_history.user_id-query_uid" duration=573.273µs 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.07304506Z level=info msg="Executing migration" id="add column org_id in query_history_star" 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.075758549Z level=info msg="Migration successfully executed" id="add column org_id in query_history_star" duration=2.713158ms 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.076320181Z level=info msg="Executing migration" id="alter table query_history_star_mig column user_id type to bigint" 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.076419897Z level=info msg="Migration successfully executed" id="alter table query_history_star_mig column user_id type to bigint" duration=100.177µs 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.076998792Z level=info msg="Executing migration" id="create correlation table v1" 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.077462138Z level=info msg="Migration successfully executed" id="create correlation table v1" duration=464.75µs 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.078094051Z level=info msg="Executing migration" id="add index correlations.uid" 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.078596672Z level=info msg="Migration successfully executed" id="add index correlations.uid" duration=502.541µs 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.079159285Z level=info msg="Executing migration" id="add index correlations.source_uid" 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.079652399Z level=info msg="Migration successfully executed" id="add index correlations.source_uid" duration=493.174µs 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.080248765Z level=info msg="Executing migration" id="add correlation config column" 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.082591421Z level=info msg="Migration successfully executed" id="add correlation config column" duration=2.342554ms 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.083179321Z level=info msg="Executing migration" id="drop index IDX_correlation_uid - v1" 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.083642488Z level=info msg="Migration successfully executed" id="drop index IDX_correlation_uid - v1" duration=462.096µs 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.084248673Z level=info msg="Executing migration" id="drop index IDX_correlation_source_uid - v1" 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.084691511Z level=info msg="Migration successfully executed" id="drop index IDX_correlation_source_uid - v1" duration=442.839µs 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.085338071Z level=info msg="Executing migration" id="Rename table correlation to correlation_tmp_qwerty - v1" 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.092204356Z level=info msg="Migration successfully executed" id="Rename table correlation to correlation_tmp_qwerty - v1" duration=6.863749ms 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.093044669Z level=info msg="Executing migration" id="create correlation v2" 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.09365469Z level=info msg="Migration successfully executed" id="create correlation v2" duration=610.412µs 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.094288707Z level=info msg="Executing migration" id="create index IDX_correlation_uid - v2" 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.094757084Z level=info msg="Migration successfully executed" id="create index IDX_correlation_uid - v2" duration=468.437µs 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.09549216Z level=info msg="Executing migration" id="create index IDX_correlation_source_uid - v2" 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.095938335Z level=info msg="Migration successfully executed" id="create index IDX_correlation_source_uid - v2" duration=446.225µs 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.096576049Z level=info msg="Executing migration" id="create index IDX_correlation_org_id - v2" 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.097054605Z level=info msg="Migration successfully executed" id="create index IDX_correlation_org_id - v2" duration=478.606µs 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.098085865Z level=info msg="Executing migration" id="copy correlation v1 to v2" 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.098285789Z level=info msg="Migration successfully executed" id="copy correlation v1 to v2" duration=199.965µs 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.098926028Z level=info msg="Executing migration" id="drop correlation_tmp_qwerty" 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.099410033Z level=info msg="Migration successfully executed" id="drop correlation_tmp_qwerty" duration=483.946µs 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.100032599Z level=info msg="Executing migration" id="add provisioning column" 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.102609292Z level=info msg="Migration successfully executed" id="add provisioning column" duration=2.576231ms 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.103109739Z level=info msg="Executing migration" id="create entity_events table" 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.103524346Z level=info msg="Migration successfully executed" id="create entity_events table" duration=414.797µs 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.104084795Z level=info msg="Executing migration" id="create dashboard public config v1" 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.104604588Z level=info msg="Migration successfully executed" id="create dashboard public config v1" duration=521.307µs 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.105303757Z level=info msg="Executing migration" id="drop index UQE_dashboard_public_config_uid - v1" 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.105531853Z level=warn msg="Skipping migration: Already executed, but not recorded in migration log" id="drop index UQE_dashboard_public_config_uid - v1" 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.10610132Z level=info msg="Executing migration" id="drop index IDX_dashboard_public_config_org_id_dashboard_uid - v1" 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.106330007Z level=warn msg="Skipping migration: Already executed, but not recorded in migration log" id="drop index IDX_dashboard_public_config_org_id_dashboard_uid - v1" 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.107053181Z level=info msg="Executing migration" id="Drop old dashboard public config table" 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.107483577Z level=info msg="Migration successfully executed" id="Drop old dashboard public config table" duration=430.394µs 2026-03-10T13:25:58.126 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.108096855Z level=info msg="Executing migration" id="recreate dashboard public config v1" 2026-03-10T13:25:58.127 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.108608062Z level=info msg="Migration successfully executed" id="recreate dashboard public config v1" duration=511.467µs 2026-03-10T13:25:58.127 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.109303104Z level=info msg="Executing migration" id="create index UQE_dashboard_public_config_uid - v1" 2026-03-10T13:25:58.127 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.10994772Z level=info msg="Migration successfully executed" id="create index UQE_dashboard_public_config_uid - v1" duration=644.075µs 2026-03-10T13:25:58.127 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.110701821Z level=info msg="Executing migration" id="create index IDX_dashboard_public_config_org_id_dashboard_uid - v1" 2026-03-10T13:25:58.127 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.111399327Z level=info msg="Migration successfully executed" id="create index IDX_dashboard_public_config_org_id_dashboard_uid - v1" duration=697.305µs 2026-03-10T13:25:58.127 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.11214369Z level=info msg="Executing migration" id="drop index UQE_dashboard_public_config_uid - v2" 2026-03-10T13:25:58.127 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.112798026Z level=info msg="Migration successfully executed" id="drop index UQE_dashboard_public_config_uid - v2" duration=654.115µs 2026-03-10T13:25:58.127 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.113419189Z level=info msg="Executing migration" id="drop index IDX_dashboard_public_config_org_id_dashboard_uid - v2" 2026-03-10T13:25:58.127 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.11403443Z level=info msg="Migration successfully executed" id="drop index IDX_dashboard_public_config_org_id_dashboard_uid - v2" duration=615.372µs 2026-03-10T13:25:58.127 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.114681371Z level=info msg="Executing migration" id="Drop public config table" 2026-03-10T13:25:58.127 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.115140831Z level=info msg="Migration successfully executed" id="Drop public config table" duration=459.661µs 2026-03-10T13:25:58.127 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.115698405Z level=info msg="Executing migration" id="Recreate dashboard public config v2" 2026-03-10T13:25:58.127 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.116303056Z level=info msg="Migration successfully executed" id="Recreate dashboard public config v2" duration=603.389µs 2026-03-10T13:25:58.127 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.116852656Z level=info msg="Executing migration" id="create index UQE_dashboard_public_config_uid - v2" 2026-03-10T13:25:58.127 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.117374232Z level=info msg="Migration successfully executed" id="create index UQE_dashboard_public_config_uid - v2" duration=521.276µs 2026-03-10T13:25:58.127 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.117920844Z level=info msg="Executing migration" id="create index IDX_dashboard_public_config_org_id_dashboard_uid - v2" 2026-03-10T13:25:58.127 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.118422595Z level=info msg="Migration successfully executed" id="create index IDX_dashboard_public_config_org_id_dashboard_uid - v2" duration=501.439µs 2026-03-10T13:25:58.127 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.118897113Z level=info msg="Executing migration" id="create index UQE_dashboard_public_config_access_token - v2" 2026-03-10T13:25:58.127 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.119372843Z level=info msg="Migration successfully executed" id="create index UQE_dashboard_public_config_access_token - v2" duration=475.619µs 2026-03-10T13:25:58.127 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:57 vm08 ceph-mon[49535]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-10T13:25:58.127 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:57 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:58.127 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:57 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:58.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:57 vm00 ceph-mon[51670]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-10T13:25:58.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:57 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:58.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:57 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:58.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:57 vm00 ceph-mon[47364]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-10T13:25:58.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:57 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:58.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:57 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.120011309Z level=info msg="Executing migration" id="Rename table dashboard_public_config to dashboard_public - v2" 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.137073728Z level=info msg="Migration successfully executed" id="Rename table dashboard_public_config to dashboard_public - v2" duration=17.057942ms 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.138082446Z level=info msg="Executing migration" id="add annotations_enabled column" 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.141132536Z level=info msg="Migration successfully executed" id="add annotations_enabled column" duration=3.03945ms 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.14187749Z level=info msg="Executing migration" id="add time_selection_enabled column" 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.144646714Z level=info msg="Migration successfully executed" id="add time_selection_enabled column" duration=2.768111ms 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.145421965Z level=info msg="Executing migration" id="delete orphaned public dashboards" 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.145638141Z level=info msg="Migration successfully executed" id="delete orphaned public dashboards" duration=216.486µs 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.14637493Z level=info msg="Executing migration" id="add share column" 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.148993011Z level=info msg="Migration successfully executed" id="add share column" duration=2.616587ms 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.14972445Z level=info msg="Executing migration" id="backfill empty share column fields with default of public" 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.149910659Z level=info msg="Migration successfully executed" id="backfill empty share column fields with default of public" duration=186.359µs 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.150598867Z level=info msg="Executing migration" id="create file table" 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.151121354Z level=info msg="Migration successfully executed" id="create file table" duration=512.649µs 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.151843957Z level=info msg="Executing migration" id="file table idx: path natural pk" 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.152430645Z level=info msg="Migration successfully executed" id="file table idx: path natural pk" duration=586.257µs 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.153101942Z level=info msg="Executing migration" id="file table idx: parent_folder_path_hash fast folder retrieval" 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.153659186Z level=info msg="Migration successfully executed" id="file table idx: parent_folder_path_hash fast folder retrieval" duration=557.273µs 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.154397267Z level=info msg="Executing migration" id="create file_meta table" 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.155507315Z level=info msg="Migration successfully executed" id="create file_meta table" duration=496.108µs 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.156259984Z level=info msg="Executing migration" id="file table idx: path key" 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.156837255Z level=info msg="Migration successfully executed" id="file table idx: path key" duration=576.909µs 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.15751339Z level=info msg="Executing migration" id="set path collation in file table" 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.157607107Z level=info msg="Migration successfully executed" id="set path collation in file table" duration=94.237µs 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.158208681Z level=info msg="Executing migration" id="migrate contents column to mediumblob for MySQL" 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.158299932Z level=info msg="Migration successfully executed" id="migrate contents column to mediumblob for MySQL" duration=91.831µs 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.158895828Z level=info msg="Executing migration" id="managed permissions migration" 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.160629904Z level=info msg="Migration successfully executed" id="managed permissions migration" duration=1.734115ms 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.161342087Z level=info msg="Executing migration" id="managed folder permissions alert actions migration" 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.162108582Z level=info msg="Migration successfully executed" id="managed folder permissions alert actions migration" duration=766.595µs 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.162679181Z level=info msg="Executing migration" id="RBAC action name migrator" 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.163437971Z level=info msg="Migration successfully executed" id="RBAC action name migrator" duration=758.932µs 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.164047822Z level=info msg="Executing migration" id="Add UID column to playlist" 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.167302015Z level=info msg="Migration successfully executed" id="Add UID column to playlist" duration=3.253461ms 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.167881218Z level=info msg="Executing migration" id="Update uid column values in playlist" 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.168038703Z level=info msg="Migration successfully executed" id="Update uid column values in playlist" duration=157.675µs 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.168630842Z level=info msg="Executing migration" id="Add index for uid in playlist" 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.169120849Z level=info msg="Migration successfully executed" id="Add index for uid in playlist" duration=489.807µs 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.169754486Z level=info msg="Executing migration" id="update group index for alert rules" 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.169983985Z level=info msg="Migration successfully executed" id="update group index for alert rules" duration=229.809µs 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.170541018Z level=info msg="Executing migration" id="managed folder permissions alert actions repeated migration" 2026-03-10T13:25:58.391 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.170988004Z level=info msg="Migration successfully executed" id="managed folder permissions alert actions repeated migration" duration=447.346µs 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.171501165Z level=info msg="Executing migration" id="admin only folder/dashboard permission" 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.17177621Z level=info msg="Migration successfully executed" id="admin only folder/dashboard permission" duration=275.125µs 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.173173085Z level=info msg="Executing migration" id="add action column to seed_assignment" 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.176800164Z level=info msg="Migration successfully executed" id="add action column to seed_assignment" duration=3.624164ms 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.178117169Z level=info msg="Executing migration" id="add scope column to seed_assignment" 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.181064137Z level=info msg="Migration successfully executed" id="add scope column to seed_assignment" duration=2.946897ms 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.18164892Z level=info msg="Executing migration" id="remove unique index builtin_role_role_name before nullable update" 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.18220398Z level=info msg="Migration successfully executed" id="remove unique index builtin_role_role_name before nullable update" duration=536.064µs 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.182754931Z level=info msg="Executing migration" id="update seed_assignment role_name column to nullable" 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.21281921Z level=info msg="Migration successfully executed" id="update seed_assignment role_name column to nullable" duration=30.05954ms 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.213817058Z level=info msg="Executing migration" id="add unique index builtin_role_name back" 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.214474889Z level=info msg="Migration successfully executed" id="add unique index builtin_role_name back" duration=657.701µs 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.215120879Z level=info msg="Executing migration" id="add unique index builtin_role_action_scope" 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.215660528Z level=info msg="Migration successfully executed" id="add unique index builtin_role_action_scope" duration=539.529µs 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.216406736Z level=info msg="Executing migration" id="add primary key to seed_assigment" 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.22470465Z level=info msg="Migration successfully executed" id="add primary key to seed_assigment" duration=8.293916ms 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.225742722Z level=info msg="Executing migration" id="add origin column to seed_assignment" 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.228365081Z level=info msg="Migration successfully executed" id="add origin column to seed_assignment" duration=2.621867ms 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.229038181Z level=info msg="Executing migration" id="add origin to plugin seed_assignment" 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.229257291Z level=info msg="Migration successfully executed" id="add origin to plugin seed_assignment" duration=218.208µs 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.229871461Z level=info msg="Executing migration" id="prevent seeding OnCall access" 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.230033204Z level=info msg="Migration successfully executed" id="prevent seeding OnCall access" duration=161.983µs 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.230700262Z level=info msg="Executing migration" id="managed folder permissions alert actions repeated fixed migration" 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.231211951Z level=info msg="Migration successfully executed" id="managed folder permissions alert actions repeated fixed migration" duration=511.488µs 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.231808396Z level=info msg="Executing migration" id="managed folder permissions library panel actions migration" 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.232598466Z level=info msg="Migration successfully executed" id="managed folder permissions library panel actions migration" duration=789.91µs 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.233133488Z level=info msg="Executing migration" id="migrate external alertmanagers to datsourcse" 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.233333912Z level=info msg="Migration successfully executed" id="migrate external alertmanagers to datsourcse" duration=201.517µs 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.233902688Z level=info msg="Executing migration" id="create folder table" 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.234436547Z level=info msg="Migration successfully executed" id="create folder table" duration=532.677µs 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.234966188Z level=info msg="Executing migration" id="Add index for parent_uid" 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.235536435Z level=info msg="Migration successfully executed" id="Add index for parent_uid" duration=570.278µs 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.236519696Z level=info msg="Executing migration" id="Add unique index for folder.uid and folder.org_id" 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.237243101Z level=info msg="Migration successfully executed" id="Add unique index for folder.uid and folder.org_id" duration=724.686µs 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.23783012Z level=info msg="Executing migration" id="Update folder title length" 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.237904138Z level=info msg="Migration successfully executed" id="Update folder title length" duration=74.47µs 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.238468565Z level=info msg="Executing migration" id="Add unique index for folder.title and folder.parent_uid" 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.239009667Z level=info msg="Migration successfully executed" id="Add unique index for folder.title and folder.parent_uid" duration=541.192µs 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.239616313Z level=info msg="Executing migration" id="Remove unique index for folder.title and folder.parent_uid" 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.240128783Z level=info msg="Migration successfully executed" id="Remove unique index for folder.title and folder.parent_uid" duration=512.64µs 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.242117114Z level=info msg="Executing migration" id="Add unique index for title, parent_uid, and org_id" 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.243116074Z level=info msg="Migration successfully executed" id="Add unique index for title, parent_uid, and org_id" duration=999.19µs 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.24795369Z level=info msg="Executing migration" id="Sync dashboard and folder table" 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.248382663Z level=info msg="Migration successfully executed" id="Sync dashboard and folder table" duration=429.765µs 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.249199763Z level=info msg="Executing migration" id="Remove ghost folders from the folder table" 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.249435284Z level=info msg="Migration successfully executed" id="Remove ghost folders from the folder table" duration=235.47µs 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.250071825Z level=info msg="Executing migration" id="Remove unique index UQE_folder_uid_org_id" 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.250652893Z level=info msg="Migration successfully executed" id="Remove unique index UQE_folder_uid_org_id" duration=581.058µs 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.255478497Z level=info msg="Executing migration" id="Add unique index UQE_folder_org_id_uid" 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.256370997Z level=info msg="Migration successfully executed" id="Add unique index UQE_folder_org_id_uid" duration=894.415µs 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.257069755Z level=info msg="Executing migration" id="Remove unique index UQE_folder_title_parent_uid_org_id" 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.257668124Z level=info msg="Migration successfully executed" id="Remove unique index UQE_folder_title_parent_uid_org_id" duration=598.911µs 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.258296401Z level=info msg="Executing migration" id="Add unique index UQE_folder_org_id_parent_uid_title" 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.258968319Z level=info msg="Migration successfully executed" id="Add unique index UQE_folder_org_id_parent_uid_title" duration=671.858µs 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.259516425Z level=info msg="Executing migration" id="Remove index IDX_folder_parent_uid_org_id" 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.260035466Z level=info msg="Migration successfully executed" id="Remove index IDX_folder_parent_uid_org_id" duration=519.261µs 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.26055002Z level=info msg="Executing migration" id="create anon_device table" 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.260984733Z level=info msg="Migration successfully executed" id="create anon_device table" duration=434.784µs 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.261544321Z level=info msg="Executing migration" id="add unique index anon_device.device_id" 2026-03-10T13:25:58.392 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.262110171Z level=info msg="Migration successfully executed" id="add unique index anon_device.device_id" duration=565.839µs 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.262819829Z level=info msg="Executing migration" id="add index anon_device.updated_at" 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.263375308Z level=info msg="Migration successfully executed" id="add index anon_device.updated_at" duration=555.349µs 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.264107058Z level=info msg="Executing migration" id="create signing_key table" 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.264552863Z level=info msg="Migration successfully executed" id="create signing_key table" duration=445.745µs 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.26522975Z level=info msg="Executing migration" id="add unique index signing_key.key_id" 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.265756796Z level=info msg="Migration successfully executed" id="add unique index signing_key.key_id" duration=526.865µs 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.266445165Z level=info msg="Executing migration" id="set legacy alert migration status in kvstore" 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.266937507Z level=info msg="Migration successfully executed" id="set legacy alert migration status in kvstore" duration=492.242µs 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.267472859Z level=info msg="Executing migration" id="migrate record of created folders during legacy migration to kvstore" 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.267711045Z level=info msg="Migration successfully executed" id="migrate record of created folders during legacy migration to kvstore" duration=238.466µs 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.268214587Z level=info msg="Executing migration" id="Add folder_uid for dashboard" 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.271000653Z level=info msg="Migration successfully executed" id="Add folder_uid for dashboard" duration=2.784312ms 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.2715381Z level=info msg="Executing migration" id="Populate dashboard folder_uid column" 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.272598705Z level=info msg="Migration successfully executed" id="Populate dashboard folder_uid column" duration=1.060656ms 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.273625035Z level=info msg="Executing migration" id="Add unique index for dashboard_org_id_folder_uid_title" 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.274268651Z level=info msg="Migration successfully executed" id="Add unique index for dashboard_org_id_folder_uid_title" duration=642.913µs 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.274881808Z level=info msg="Executing migration" id="Delete unique index for dashboard_org_id_folder_id_title" 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.275462955Z level=info msg="Migration successfully executed" id="Delete unique index for dashboard_org_id_folder_id_title" duration=581.007µs 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.276038774Z level=info msg="Executing migration" id="Delete unique index for dashboard_org_id_folder_uid_title" 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.276566673Z level=info msg="Migration successfully executed" id="Delete unique index for dashboard_org_id_folder_uid_title" duration=527.828µs 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.277106123Z level=info msg="Executing migration" id="Add unique index for dashboard_org_id_folder_uid_title_is_folder" 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.277773772Z level=info msg="Migration successfully executed" id="Add unique index for dashboard_org_id_folder_uid_title_is_folder" duration=667.509µs 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.27835545Z level=info msg="Executing migration" id="Restore index for dashboard_org_id_folder_id_title" 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.279100175Z level=info msg="Migration successfully executed" id="Restore index for dashboard_org_id_folder_id_title" duration=744.554µs 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.27971705Z level=info msg="Executing migration" id="create sso_setting table" 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.280269653Z level=info msg="Migration successfully executed" id="create sso_setting table" duration=554.187µs 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.280855811Z level=info msg="Executing migration" id="copy kvstore migration status to each org" 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.281451366Z level=info msg="Migration successfully executed" id="copy kvstore migration status to each org" duration=595.545µs 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.281961039Z level=info msg="Executing migration" id="add back entry for orgid=0 migrated status" 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.282363243Z level=info msg="Migration successfully executed" id="add back entry for orgid=0 migrated status" duration=402.525µs 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.282931858Z level=info msg="Executing migration" id="alter kv_store.value to longtext" 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.28302907Z level=info msg="Migration successfully executed" id="alter kv_store.value to longtext" duration=97.652µs 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.283783632Z level=info msg="Executing migration" id="add notification_settings column to alert_rule table" 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.286680113Z level=info msg="Migration successfully executed" id="add notification_settings column to alert_rule table" duration=2.895651ms 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.293455076Z level=info msg="Executing migration" id="add notification_settings column to alert_rule_version table" 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.298505601Z level=info msg="Migration successfully executed" id="add notification_settings column to alert_rule_version table" duration=5.049292ms 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.301761796Z level=info msg="Executing migration" id="removing scope from alert.instances:read action migration" 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.302186532Z level=info msg="Migration successfully executed" id="removing scope from alert.instances:read action migration" duration=423.233µs 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=migrator t=2026-03-10T13:25:58.303702669Z level=info msg="migrations completed" performed=169 skipped=378 duration=428.335112ms 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=sqlstore t=2026-03-10T13:25:58.30432816Z level=info msg="Created default organization" 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=secrets t=2026-03-10T13:25:58.307149663Z level=info msg="Envelope encryption state" enabled=true currentprovider=secretKey.v1 2026-03-10T13:25:58.393 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=plugin.store t=2026-03-10T13:25:58.345140509Z level=info msg="Loading plugins..." 2026-03-10T13:25:58.681 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=local.finder t=2026-03-10T13:25:58.433713392Z level=warn msg="Skipping finding plugins as directory does not exist" path=/usr/share/grafana/plugins-bundled 2026-03-10T13:25:58.681 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=plugin.store t=2026-03-10T13:25:58.4339736Z level=info msg="Plugins loaded" count=55 duration=88.833002ms 2026-03-10T13:25:58.681 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=query_data t=2026-03-10T13:25:58.435724217Z level=info msg="Query Service initialization" 2026-03-10T13:25:58.681 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=live.push_http t=2026-03-10T13:25:58.43740857Z level=info msg="Live Push Gateway initialization" 2026-03-10T13:25:58.681 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=ngalert.migration t=2026-03-10T13:25:58.439908901Z level=info msg=Starting 2026-03-10T13:25:58.681 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=ngalert t=2026-03-10T13:25:58.443749861Z level=warn msg="Unexpected number of rows updating alert configuration history" rows=0 org=1 hash=not-yet-calculated 2026-03-10T13:25:58.681 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=ngalert.state.manager t=2026-03-10T13:25:58.444443169Z level=info msg="Running in alternative execution of Error/NoData mode" 2026-03-10T13:25:58.681 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=infra.usagestats.collector t=2026-03-10T13:25:58.445286247Z level=info msg="registering usage stat providers" usageStatsProvidersLen=2 2026-03-10T13:25:58.681 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=provisioning.datasources t=2026-03-10T13:25:58.448019633Z level=info msg="deleted datasource based on configuration" name=Dashboard1 2026-03-10T13:25:58.681 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=provisioning.datasources t=2026-03-10T13:25:58.448296782Z level=info msg="inserting datasource from configuration" name=Dashboard1 uid=P43CA22E17D0F9596 2026-03-10T13:25:58.681 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=provisioning.alerting t=2026-03-10T13:25:58.458073785Z level=info msg="starting to provision alerting" 2026-03-10T13:25:58.681 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=provisioning.alerting t=2026-03-10T13:25:58.458244344Z level=info msg="finished to provision alerting" 2026-03-10T13:25:58.681 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=ngalert.state.manager t=2026-03-10T13:25:58.458367163Z level=info msg="Warming state cache for startup" 2026-03-10T13:25:58.681 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=ngalert.state.manager t=2026-03-10T13:25:58.458556458Z level=info msg="State cache has been initialized" states=0 duration=188.012µs 2026-03-10T13:25:58.681 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=grafanaStorageLogger t=2026-03-10T13:25:58.459814283Z level=info msg="Storage starting" 2026-03-10T13:25:58.681 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=http.server t=2026-03-10T13:25:58.46042715Z level=info msg="HTTP Server TLS settings" MinTLSVersion=TLS1.2 configuredciphers=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA 2026-03-10T13:25:58.681 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=http.server t=2026-03-10T13:25:58.460599974Z level=info msg="HTTP Server Listen" address=[::]:3000 protocol=https subUrl= socket= 2026-03-10T13:25:58.681 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=ngalert.multiorg.alertmanager t=2026-03-10T13:25:58.460624068Z level=info msg="Starting MultiOrg Alertmanager" 2026-03-10T13:25:58.681 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=ngalert.scheduler t=2026-03-10T13:25:58.460630892Z level=info msg="Starting scheduler" tickInterval=10s maxAttempts=1 2026-03-10T13:25:58.681 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=ticker t=2026-03-10T13:25:58.460642774Z level=info msg=starting first_tick=2026-03-10T13:26:00Z 2026-03-10T13:25:58.681 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=provisioning.dashboard t=2026-03-10T13:25:58.469932693Z level=info msg="starting to provision dashboards" 2026-03-10T13:25:58.681 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=sqlstore.transactions t=2026-03-10T13:25:58.481471123Z level=info msg="Database locked, sleeping then retrying" error="database is locked" retry=0 code="database is locked" 2026-03-10T13:25:58.681 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=plugins.update.checker t=2026-03-10T13:25:58.594676688Z level=info msg="Update check succeeded" duration=135.835898ms 2026-03-10T13:25:58.934 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=provisioning.dashboard t=2026-03-10T13:25:58.679878732Z level=info msg="finished to provision dashboards" 2026-03-10T13:25:58.934 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=grafana-apiserver t=2026-03-10T13:25:58.729075115Z level=info msg="Adding GroupVersion playlist.grafana.app v0alpha1 to ResourceManager" 2026-03-10T13:25:58.934 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:25:58 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=grafana-apiserver t=2026-03-10T13:25:58.729610037Z level=info msg="Adding GroupVersion featuretoggle.grafana.app v0alpha1 to ResourceManager" 2026-03-10T13:25:59.062 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:58 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:59.062 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:58 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:59.062 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:58 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:59.062 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:58 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:59.062 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:58 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:59.062 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:58 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:59.062 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:58 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:59.062 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:58 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:59.232 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:58 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:59.232 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:58 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:59.232 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:58 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:25:59.232 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:58 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:00.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:59 vm00 ceph-mon[47364]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-10T13:26:00.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:59 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:00.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:59 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:00.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:59 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:00.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:59 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:00.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:59 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:00.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:25:59 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:00.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:59 vm00 ceph-mon[51670]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-10T13:26:00.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:59 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:00.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:59 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:00.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:59 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:00.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:59 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:00.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:59 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:00.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:25:59 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:00.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:59 vm08 ceph-mon[49535]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-10T13:26:00.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:59 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:00.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:59 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:00.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:59 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:00.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:59 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:00.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:59 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:00.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:25:59 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:01.198 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:00 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:26:00] ENGINE Bus STOPPING 2026-03-10T13:26:01.502 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:01 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:26:01] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-10T13:26:01.502 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:01 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:26:01] ENGINE Bus STOPPED 2026-03-10T13:26:01.503 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:01 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:26:01] ENGINE Bus STARTING 2026-03-10T13:26:01.503 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:01 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:26:01] ENGINE Serving on http://:::9283 2026-03-10T13:26:01.503 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:01 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:26:01] ENGINE Bus STARTED 2026-03-10T13:26:02.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm08.local:3000"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm08.local:3000"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: Upgrade: Finalizing container_image settings 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: Upgrade: Complete! 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-10T13:26:02.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm08.local:3000"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm08.local:3000"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: Upgrade: Finalizing container_image settings 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: Upgrade: Complete! 2026-03-10T13:26:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-10T13:26:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:01 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-10T13:26:02.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s 2026-03-10T13:26:02.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:02.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:02.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:26:02.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm08.local:3000"}]: dispatch 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm08.local:3000"}]: dispatch 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: Upgrade: Finalizing container_image settings 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:02.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:02.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:02.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:02.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: Upgrade: Complete! 2026-03-10T13:26:02.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-10T13:26:02.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:01 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-10T13:26:03.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:03 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:03.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:03 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:03.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:03 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:26:03.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:03 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:26:03.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:03 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:03.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:03 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:26:03.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:03 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:26:03.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:03 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:03.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:03 vm00 ceph-mon[47364]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 938 B/s rd, 0 op/s 2026-03-10T13:26:03.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:03 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:03.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:03 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:03.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:03 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:26:03.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:03 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:26:03.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:03 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:03.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:03 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:26:03.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:03 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:26:03.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:03 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:03.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:03 vm00 ceph-mon[51670]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 938 B/s rd, 0 op/s 2026-03-10T13:26:03.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:03 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:03.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:03 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:03.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:03 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:26:03.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:03 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:26:03.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:03 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:03.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:03 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:26:03.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:03 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:26:03.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:03 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:03.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:03 vm08 ceph-mon[49535]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 938 B/s rd, 0 op/s 2026-03-10T13:26:06.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:05 vm00 ceph-mon[47364]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:06.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:05 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:06.002 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:05 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:26:05] "GET /metrics HTTP/1.1" 200 37550 "" "Prometheus/2.51.0" 2026-03-10T13:26:06.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:05 vm00 ceph-mon[51670]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:06.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:05 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:06.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:05 vm08 ceph-mon[49535]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:06.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:05 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:07.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:26:06 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:26:06.982Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:26:07.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:26:06 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:26:06.983Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:26:08.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:07 vm00 ceph-mon[47364]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:08.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:07 vm00 ceph-mon[51670]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:08.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:07 vm08 ceph-mon[49535]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:10.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:09 vm00 ceph-mon[47364]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:26:10.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:09 vm00 ceph-mon[51670]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:26:10.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:09 vm08 ceph-mon[49535]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:26:10.598 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.mgr | length == 1'"'"'' 2026-03-10T13:26:11.138 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:10 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:26:11.138 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:10 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:26:11.138 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T13:26:11.189 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.mgr | keys'"'"' | grep $sha1' 2026-03-10T13:26:11.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:10 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:26:11.714 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)" 2026-03-10T13:26:11.769 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.overall | length == 2'"'"'' 2026-03-10T13:26:11.980 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:11 vm00 ceph-mon[51670]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:11.980 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:11 vm00 ceph-mon[51670]: from='client.25132 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:11.980 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:11 vm00 ceph-mon[51670]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:26:11.980 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:11 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/979600646' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:11.980 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:11 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/3302943624' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:11.980 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:11 vm00 ceph-mon[47364]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:11.980 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:11 vm00 ceph-mon[47364]: from='client.25132 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:11.980 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:11 vm00 ceph-mon[47364]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:26:11.980 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:11 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/979600646' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:11.980 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:11 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/3302943624' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:12.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:11 vm08 ceph-mon[49535]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:12.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:11 vm08 ceph-mon[49535]: from='client.25132 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:12.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:11 vm08 ceph-mon[49535]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:26:12.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:11 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/979600646' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:12.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:11 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/3302943624' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:12.340 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T13:26:12.408 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '"'"'.up_to_date | length == 2'"'"'' 2026-03-10T13:26:13.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:12 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/621543116' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:13.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:12 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/621543116' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:13.268 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T13:26:13.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:12 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/621543116' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:13.309 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade status' 2026-03-10T13:26:13.819 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:26:13.819 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": null, 2026-03-10T13:26:13.819 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": false, 2026-03-10T13:26:13.819 INFO:teuthology.orchestra.run.vm00.stdout: "which": "", 2026-03-10T13:26:13.819 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [], 2026-03-10T13:26:13.819 INFO:teuthology.orchestra.run.vm00.stdout: "progress": null, 2026-03-10T13:26:13.819 INFO:teuthology.orchestra.run.vm00.stdout: "message": "", 2026-03-10T13:26:13.819 INFO:teuthology.orchestra.run.vm00.stdout: "is_paused": false 2026-03-10T13:26:13.820 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:26:13.987 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph health detail' 2026-03-10T13:26:14.134 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:13 vm00 ceph-mon[47364]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:26:14.134 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:13 vm00 ceph-mon[47364]: from='client.25153 -' entity='client.admin' cmd=[{"prefix": "orch upgrade check", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:14.134 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:13 vm00 ceph-mon[51670]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:26:14.135 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:13 vm00 ceph-mon[51670]: from='client.25153 -' entity='client.admin' cmd=[{"prefix": "orch upgrade check", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:14.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:13 vm08 ceph-mon[49535]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:26:14.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:13 vm08 ceph-mon[49535]: from='client.25153 -' entity='client.admin' cmd=[{"prefix": "orch upgrade check", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:14.516 INFO:teuthology.orchestra.run.vm00.stdout:HEALTH_OK 2026-03-10T13:26:14.597 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mon --hosts $(ceph orch ps | grep mgr.x | awk '"'"'{print $2}'"'"')' 2026-03-10T13:26:15.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:14 vm00 ceph-mon[47364]: from='client.25156 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:15.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:14 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/136301179' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T13:26:15.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:14 vm00 ceph-mon[51670]: from='client.25156 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:15.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:14 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/136301179' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T13:26:15.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:14 vm08 ceph-mon[49535]: from='client.25156 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:15.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:14 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/136301179' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T13:26:15.654 INFO:teuthology.orchestra.run.vm00.stdout:Initiating upgrade to quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:26:15.745 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'while ceph orch upgrade status | jq '"'"'.in_progress'"'"' | grep true && ! ceph orch upgrade status | jq '"'"'.message'"'"' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done' 2026-03-10T13:26:15.773 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:15 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:26:15] "GET /metrics HTTP/1.1" 200 37547 "" "Prometheus/2.51.0" 2026-03-10T13:26:16.024 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:15 vm00 ceph-mon[47364]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:16.025 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:15 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:16.025 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:15 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:26:16.025 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:15 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:26:16.025 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:15 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:16.025 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:15 vm00 ceph-mon[51670]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:16.025 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:15 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:16.025 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:15 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:26:16.025 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:15 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:26:16.025 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:15 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:16.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:15 vm08 ceph-mon[49535]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:16.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:15 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:16.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:15 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:26:16.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:15 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:26:16.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:15 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:16.314 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T13:26:16.694 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T13:26:16.694 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (12m) 17s ago 19m 26.4M - 0.25.0 c8568f914cd2 12fde3cf83cb 2026-03-10T13:26:16.694 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm08 *:3000 running (18s) 14s ago 19m 72.7M - 10.4.0 c8b91775d855 960e32589e98 2026-03-10T13:26:16.694 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dezodo vm00 running (26s) 17s ago 18m 43.0M - 3.5 e1d6a67b021e 630bf6d4e7f3 2026-03-10T13:26:16.695 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm08 *:8443,9283,8765 running (30s) 14s ago 20m 488M - 19.2.3-678-ge911bdeb 654f31e6858e 31b91eebc856 2026-03-10T13:26:16.695 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (7m) 17s ago 21m 556M - 19.2.3-678-ge911bdeb 654f31e6858e 5bc576d4d32b 2026-03-10T13:26:16.695 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (21m) 17s ago 21m 77.3M 2048M 17.2.0 e1d6a67b021e f0e3f322471c 2026-03-10T13:26:16.695 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm08 running (21m) 14s ago 21m 57.2M 2048M 17.2.0 e1d6a67b021e d3c1458bc898 2026-03-10T13:26:16.695 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (21m) 17s ago 21m 56.2M 2048M 17.2.0 e1d6a67b021e d00b7fd44c23 2026-03-10T13:26:16.695 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (12m) 17s ago 19m 10.4M - 1.7.0 72c9c2088986 bcf883401619 2026-03-10T13:26:16.695 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm08 *:9100 running (12m) 14s ago 19m 10.0M - 1.7.0 72c9c2088986 4ac83f03f818 2026-03-10T13:26:16.695 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (20m) 17s ago 20m 52.9M 4096M 17.2.0 e1d6a67b021e 2919c7073fa7 2026-03-10T13:26:16.695 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (20m) 17s ago 20m 57.2M 4096M 17.2.0 e1d6a67b021e 647927dc41ea 2026-03-10T13:26:16.695 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (20m) 17s ago 20m 55.1M 4096M 17.2.0 e1d6a67b021e 1e417e82c2b9 2026-03-10T13:26:16.695 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (20m) 17s ago 20m 53.3M 4096M 17.2.0 e1d6a67b021e 98bb6d7ea69f 2026-03-10T13:26:16.695 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm08 running (20m) 14s ago 20m 53.8M 4096M 17.2.0 e1d6a67b021e e349440ca776 2026-03-10T13:26:16.695 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm08 running (20m) 14s ago 20m 56.3M 4096M 17.2.0 e1d6a67b021e 9a6a2a67cc99 2026-03-10T13:26:16.695 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm08 running (19m) 14s ago 19m 52.1M 4096M 17.2.0 e1d6a67b021e c27676916d52 2026-03-10T13:26:16.695 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm08 running (19m) 14s ago 19m 53.9M 4096M 17.2.0 e1d6a67b021e bf67951990a5 2026-03-10T13:26:16.695 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm08 *:9095 running (31s) 14s ago 19m 46.8M - 2.51.0 1d3b7f56885b 3f9b2d0821c9 2026-03-10T13:26:16.695 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.tvlvzo vm00 *:8000 running (19m) 17s ago 19m 96.8M - 17.2.0 e1d6a67b021e be24eac16807 2026-03-10T13:26:16.695 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm08.ljayps vm08 *:8000 running (19m) 14s ago 19m 93.7M - 17.2.0 e1d6a67b021e 67b525427823 2026-03-10T13:26:16.946 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:26:16.946 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T13:26:16.946 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-10T13:26:16.946 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:26:16.946 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T13:26:16.946 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T13:26:16.946 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:26:16.946 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T13:26:16.946 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-10T13:26:16.946 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:26:16.946 INFO:teuthology.orchestra.run.vm00.stdout: "mds": {}, 2026-03-10T13:26:16.946 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T13:26:16.946 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-10T13:26:16.947 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:26:16.947 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T13:26:16.947 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 13, 2026-03-10T13:26:16.947 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T13:26:16.947 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T13:26:16.947 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:26:17.163 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:26:17.164 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T13:26:17.164 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": true, 2026-03-10T13:26:17.164 INFO:teuthology.orchestra.run.vm00.stdout: "which": "Upgrading daemons of type(s) mon on host(s) vm08", 2026-03-10T13:26:17.164 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [], 2026-03-10T13:26:17.164 INFO:teuthology.orchestra.run.vm00.stdout: "progress": "0/1 daemons upgraded", 2026-03-10T13:26:17.164 INFO:teuthology.orchestra.run.vm00.stdout: "message": "Currently upgrading mon daemons", 2026-03-10T13:26:17.164 INFO:teuthology.orchestra.run.vm00.stdout: "is_paused": false 2026-03-10T13:26:17.164 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:26:17.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:17 vm00 ceph-mon[47364]: from='client.25165 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:17.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:17 vm00 ceph-mon[47364]: from='client.15297 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "mon", "hosts": "vm08", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:17.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:17 vm00 ceph-mon[47364]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:26:17.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:17 vm00 ceph-mon[47364]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:26:17.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:17 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:17.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:17 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:26:17.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:17 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:17.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:17 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:17.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:17 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-10T13:26:17.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:17 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["b"]}]: dispatch 2026-03-10T13:26:17.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:17 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:17.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:17 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T13:26:17.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:17 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T13:26:17.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:17 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:26:17.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:17 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/1691507177' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:17.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:17 vm00 ceph-mon[51670]: from='client.25165 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:17.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:17 vm00 ceph-mon[51670]: from='client.15297 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "mon", "hosts": "vm08", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:17.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:17 vm00 ceph-mon[51670]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:26:17.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:17 vm00 ceph-mon[51670]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:26:17.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:17 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:17.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:17 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:26:17.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:17 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:17.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:17 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:17.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:17 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-10T13:26:17.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:17 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["b"]}]: dispatch 2026-03-10T13:26:17.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:17 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:17.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:17 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T13:26:17.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:17 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T13:26:17.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:17 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:26:17.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:17 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/1691507177' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:17.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:26:16 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:26:16.983Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:26:17.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:26:16 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:26:16.984Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:26:17.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[49535]: from='client.25165 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:17.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[49535]: from='client.15297 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "mon", "hosts": "vm08", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:17.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[49535]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:26:17.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[49535]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:26:17.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:17.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:26:17.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:17.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:17.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-10T13:26:17.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["b"]}]: dispatch 2026-03-10T13:26:17.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:17.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T13:26:17.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T13:26:17.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[49535]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:26:17.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[49535]: from='client.? 192.168.123.100:0/1691507177' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:17.600 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 systemd[1]: Stopping Ceph mon.b for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:26:17.600 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mon-b[49531]: 2026-03-10T13:26:17.395+0000 7f107acd5700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.b -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T13:26:17.600 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mon-b[49531]: 2026-03-10T13:26:17.395+0000 7f107acd5700 -1 mon.b@2(peon) e3 *** Got Signal Terminated *** 2026-03-10T13:26:17.600 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 podman[82515]: 2026-03-10 13:26:17.447026715 +0000 UTC m=+0.065018202 container died d3c1458bc8985130344461c3e860500a878d029498badd5b16f011e74ccc1da6 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mon-b, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, name=centos-stream, release=754, vendor=Red Hat, Inc., io.openshift.expose-services=, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, vcs-type=git, CEPH_POINT_RELEASE=-17.2.0, distribution-scope=public, build-date=2022-05-03T08:36:31.336870, ceph=True, RELEASE=HEAD, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.component=centos-stream-container, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, GIT_BRANCH=HEAD, architecture=x86_64, io.openshift.tags=base centos centos-stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, io.buildah.version=1.19.8, maintainer=Guillaume Abrioux , com.redhat.license_terms=https://centos.org/legal/licensing-policy/, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=CentOS Stream 8, version=8, GIT_CLEAN=True) 2026-03-10T13:26:17.601 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 podman[82515]: 2026-03-10 13:26:17.463131231 +0000 UTC m=+0.081122720 container remove d3c1458bc8985130344461c3e860500a878d029498badd5b16f011e74ccc1da6 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mon-b, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, vcs-type=git, CEPH_POINT_RELEASE=-17.2.0, architecture=x86_64, distribution-scope=public, maintainer=Guillaume Abrioux , io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base centos centos-stream, io.buildah.version=1.19.8, name=centos-stream, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, vendor=Red Hat, Inc., build-date=2022-05-03T08:36:31.336870, com.redhat.component=centos-stream-container, io.k8s.display-name=CentOS Stream 8, version=8, GIT_BRANCH=HEAD, io.openshift.expose-services=, ceph=True, GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, release=754, GIT_CLEAN=True, RELEASE=HEAD) 2026-03-10T13:26:17.601 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 bash[82515]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mon-b 2026-03-10T13:26:17.601 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mon.b.service: Deactivated successfully. 2026-03-10T13:26:17.601 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 systemd[1]: Stopped Ceph mon.b for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:26:17.601 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mon.b.service: Consumed 12.242s CPU time. 2026-03-10T13:26:18.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 systemd[1]: Starting Ceph mon.b for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:26:18.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 podman[82624]: 2026-03-10 13:26:17.810393219 +0000 UTC m=+0.016992461 container create 8cceb678a9ee024d11000658e9a9e5d7573fbf291524bf1611b9aae18bc056e5 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mon-b, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, ceph=True, CEPH_REF=squid, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 podman[82624]: 2026-03-10 13:26:17.843159965 +0000 UTC m=+0.049759198 container init 8cceb678a9ee024d11000658e9a9e5d7573fbf291524bf1611b9aae18bc056e5 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mon-b, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 podman[82624]: 2026-03-10 13:26:17.845445444 +0000 UTC m=+0.052044686 container start 8cceb678a9ee024d11000658e9a9e5d7573fbf291524bf1611b9aae18bc056e5 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mon-b, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.vendor=CentOS) 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 bash[82624]: 8cceb678a9ee024d11000658e9a9e5d7573fbf291524bf1611b9aae18bc056e5 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 podman[82624]: 2026-03-10 13:26:17.80321965 +0000 UTC m=+0.009818902 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 systemd[1]: Started Ceph mon.b for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: set uid:gid to 167:167 (ceph:ceph) 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable), process ceph-mon, pid 2 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: pidfile_write: ignore empty --pid-file 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: load: jerasure load: lrc 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: RocksDB version: 7.9.2 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Git sha 0 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Compile date 2026-02-25 18:11:04 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: DB SUMMARY 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: DB Session ID: CJCG8FCDTSWS5IJAOKPN 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: CURRENT file: CURRENT 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: IDENTITY file: IDENTITY 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: MANIFEST file: MANIFEST-000009 size: 2068 Bytes 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: SST files in /var/lib/ceph/mon/ceph-b/store.db dir, Total Num: 1, files: 000042.sst 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-b/store.db: 000040.log size: 52485 ; 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.error_if_exists: 0 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.create_if_missing: 0 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.paranoid_checks: 1 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.flush_verify_memtable_count: 1 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.env: 0x556326230dc0 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.fs: PosixFileSystem 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.info_log: 0x55632814c5c0 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.max_file_opening_threads: 16 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.statistics: (nil) 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.use_fsync: 0 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.max_log_file_size: 0 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.keep_log_file_num: 1000 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.recycle_log_file_num: 0 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.allow_fallocate: 1 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.allow_mmap_reads: 0 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.allow_mmap_writes: 0 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.use_direct_reads: 0 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.create_missing_column_families: 0 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.db_log_dir: 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.wal_dir: 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.advise_random_on_open: 1 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.db_write_buffer_size: 0 2026-03-10T13:26:18.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.write_buffer_manager: 0x556328151900 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.rate_limiter: (nil) 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.wal_recovery_mode: 2 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.enable_thread_tracking: 0 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.enable_pipelined_write: 0 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.unordered_write: 0 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.row_cache: None 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.wal_filter: None 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.allow_ingest_behind: 0 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.two_write_queues: 0 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.manual_wal_flush: 0 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.wal_compression: 0 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.atomic_flush: 0 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.log_readahead_size: 0 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.best_efforts_recovery: 0 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.allow_data_in_errors: 0 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.db_host_id: __hostname__ 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.enforce_single_del_contracts: true 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.max_background_jobs: 2 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.max_background_compactions: -1 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.max_subcompactions: 1 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.max_total_wal_size: 0 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.max_open_files: -1 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.bytes_per_sync: 0 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.compaction_readahead_size: 0 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.max_background_flushes: -1 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Compression algorithms supported: 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: kZSTD supported: 0 2026-03-10T13:26:18.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: kXpressCompression supported: 0 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: kBZip2Compression supported: 0 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: kLZ4Compression supported: 1 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: kZlibCompression supported: 1 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: kLZ4HCCompression supported: 1 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: kSnappyCompression supported: 1 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: DMutex implementation: pthread_mutex_t 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-b/store.db/MANIFEST-000009 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.merge_operator: 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.compaction_filter: None 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.compaction_filter_factory: None 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.sst_partitioner_factory: None 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x55632814c5a0) 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout: cache_index_and_filter_blocks: 1 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout: pin_top_level_index_and_filter: 1 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout: index_type: 0 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout: data_block_index_type: 0 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout: index_shortening: 1 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout: checksum: 4 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout: no_block_cache: 0 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout: block_cache: 0x556328171350 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout: block_cache_name: BinnedLRUCache 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout: block_cache_options: 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout: capacity : 536870912 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout: num_shard_bits : 4 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout: strict_capacity_limit : 0 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout: high_pri_pool_ratio: 0.000 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout: block_cache_compressed: (nil) 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout: persistent_cache: (nil) 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout: block_size: 4096 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout: block_size_deviation: 10 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout: block_restart_interval: 16 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout: index_block_restart_interval: 1 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout: metadata_block_size: 4096 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout: partition_filters: 0 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout: use_delta_encoding: 1 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout: filter_policy: bloomfilter 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout: whole_key_filtering: 1 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout: verify_compression: 0 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout: read_amp_bytes_per_bit: 0 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout: format_version: 5 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout: enable_index_compression: 1 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout: block_align: 0 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout: max_auto_readahead_size: 262144 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout: prepopulate_block_cache: 0 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout: initial_auto_readahead_size: 8192 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout: num_file_reads_for_auto_readahead: 2 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.write_buffer_size: 33554432 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.max_write_buffer_number: 2 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.compression: NoCompression 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.bottommost_compression: Disabled 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.prefix_extractor: nullptr 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-10T13:26:18.023 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.num_levels: 7 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.compression_opts.level: 32767 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.compression_opts.strategy: 0 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.compression_opts.enabled: false 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.target_file_size_base: 67108864 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.arena_block_size: 1048576 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.disable_auto_compactions: 0 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.inplace_update_support: 0 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.bloom_locality: 0 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.max_successive_merges: 0 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-10T13:26:18.024 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.paranoid_file_checks: 0 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.force_consistency_checks: 1 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.report_bg_io_stats: 0 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.ttl: 2592000 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.preclude_last_level_data_seconds: 0 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.preserve_internal_time_seconds: 0 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.enable_blob_files: false 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.min_blob_size: 0 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.blob_file_size: 268435456 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.blob_compaction_readahead_size: 0 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.blob_file_starting_level: 0 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: [table/block_based/block_based_table_reader.cc:721] At least one SST file opened without unique ID to verify: 42.sst 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed. 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-b/store.db/MANIFEST-000009 succeeded,manifest_file_number is 9, next_file_number is 44, last_sequence is 23710, log_number is 40,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 40 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: 46353dfe-0d12-4656-9f81-63b2defe6f15 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773149177868214, "job": 1, "event": "recovery_started", "wal_files": [40]} 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #40 mode 2 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773149177873399, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 45, "file_size": 35987, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 23715, "largest_seqno": 23740, "table_properties": {"data_size": 34773, "index_size": 120, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 69, "raw_key_size": 634, "raw_average_key_size": 25, "raw_value_size": 34231, "raw_average_value_size": 1369, "num_data_blocks": 5, "num_entries": 25, "num_filter_entries": 25, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1773149177, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "46353dfe-0d12-4656-9f81-63b2defe6f15", "db_session_id": "CJCG8FCDTSWS5IJAOKPN", "orig_file_number": 45, "seqno_to_time_mapping": "N/A"}} 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773149177873471, "job": 1, "event": "recovery_finished"} 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: [db/version_set.cc:5047] Creating manifest 47 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed. 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-b/store.db/000040.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x556328172e00 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: DB pointer 0x556328288000 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: starting mon.b rank 2 at public addrs [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] at bind addrs [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] mon_data /var/lib/ceph/mon/ceph-b fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: mon.b@-1(???) e3 preinit fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: mon.b@-1(???).mds e1 new map 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: mon.b@-1(???).mds e1 print_map 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout: e1 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout: btime 1970-01-01T00:00:00:000000+0000 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2} 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout: legacy client fscid: -1 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout: 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout: No filesystems configured 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: mon.b@-1(???).osd e94 crush map has features 3314933000854323200, adjusting msgr requires 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: mon.b@-1(???).osd e94 crush map has features 432629239337189376, adjusting msgr requires 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: mon.b@-1(???).osd e94 crush map has features 432629239337189376, adjusting msgr requires 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: mon.b@-1(???).osd e94 crush map has features 432629239337189376, adjusting msgr requires 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: rocksdb: [db/db_impl/db_impl.cc:1111] 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout: ** DB Stats ** 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout: 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout: ** Compaction Stats [default] ** 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout: L0 1/0 35.14 KB 0.2 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 6.8 0.01 0.00 1 0.005 0 0 0.0 0.0 2026-03-10T13:26:18.025 INFO:journalctl@ceph.mon.b.vm08.stdout: L6 1/0 9.48 MB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0 0.0 0.0 2026-03-10T13:26:18.026 INFO:journalctl@ceph.mon.b.vm08.stdout: Sum 2/0 9.51 MB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 6.8 0.01 0.00 1 0.005 0 0 0.0 0.0 2026-03-10T13:26:18.026 INFO:journalctl@ceph.mon.b.vm08.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 6.8 0.01 0.00 1 0.005 0 0 0.0 0.0 2026-03-10T13:26:18.026 INFO:journalctl@ceph.mon.b.vm08.stdout: 2026-03-10T13:26:18.026 INFO:journalctl@ceph.mon.b.vm08.stdout: ** Compaction Stats [default] ** 2026-03-10T13:26:18.026 INFO:journalctl@ceph.mon.b.vm08.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-10T13:26:18.026 INFO:journalctl@ceph.mon.b.vm08.stdout: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-10T13:26:18.026 INFO:journalctl@ceph.mon.b.vm08.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 6.8 0.01 0.00 1 0.005 0 0 0.0 0.0 2026-03-10T13:26:18.026 INFO:journalctl@ceph.mon.b.vm08.stdout: 2026-03-10T13:26:18.026 INFO:journalctl@ceph.mon.b.vm08.stdout: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-03-10T13:26:18.026 INFO:journalctl@ceph.mon.b.vm08.stdout: 2026-03-10T13:26:18.026 INFO:journalctl@ceph.mon.b.vm08.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-10T13:26:18.026 INFO:journalctl@ceph.mon.b.vm08.stdout: Flush(GB): cumulative 0.000, interval 0.000 2026-03-10T13:26:18.026 INFO:journalctl@ceph.mon.b.vm08.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-10T13:26:18.026 INFO:journalctl@ceph.mon.b.vm08.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-10T13:26:18.026 INFO:journalctl@ceph.mon.b.vm08.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-10T13:26:18.026 INFO:journalctl@ceph.mon.b.vm08.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-10T13:26:18.026 INFO:journalctl@ceph.mon.b.vm08.stdout: Cumulative compaction: 0.00 GB write, 3.16 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-10T13:26:18.026 INFO:journalctl@ceph.mon.b.vm08.stdout: Interval compaction: 0.00 GB write, 3.16 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-10T13:26:18.026 INFO:journalctl@ceph.mon.b.vm08.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-10T13:26:18.026 INFO:journalctl@ceph.mon.b.vm08.stdout: Block cache BinnedLRUCache@0x556328171350#2 capacity: 512.00 MB usage: 143.11 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 8e-06 secs_since: 0 2026-03-10T13:26:18.026 INFO:journalctl@ceph.mon.b.vm08.stdout: Block cache entry stats(count,size,portion): DataBlock(18,110.69 KB,0.021112%) FilterBlock(2,12.14 KB,0.00231564%) IndexBlock(2,20.28 KB,0.00386834%) Misc(1,0.00 KB,0%) 2026-03-10T13:26:18.026 INFO:journalctl@ceph.mon.b.vm08.stdout: 2026-03-10T13:26:18.026 INFO:journalctl@ceph.mon.b.vm08.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-10T13:26:18.026 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:17 vm08 ceph-mon[82639]: mon.b@-1(???).paxosservice(auth 1..22) refresh upgraded, format 0 -> 3 2026-03-10T13:26:19.145 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:19 vm08 ceph-mon[82639]: from='client.15318 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:19.145 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:19 vm08 ceph-mon[82639]: mon.b calling monitor election 2026-03-10T13:26:19.145 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:19 vm08 ceph-mon[82639]: mon.a calling monitor election 2026-03-10T13:26:19.145 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:19 vm08 ceph-mon[82639]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T13:26:19.145 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:19 vm08 ceph-mon[82639]: monmap e3: 3 mons at {a=[v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0],b=[v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0],c=[v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0]} 2026-03-10T13:26:19.145 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:19 vm08 ceph-mon[82639]: fsmap 2026-03-10T13:26:19.145 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:19 vm08 ceph-mon[82639]: osdmap e94: 8 total, 8 up, 8 in 2026-03-10T13:26:19.145 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:19 vm08 ceph-mon[82639]: mgrmap e39: y(active, since 38s), standbys: x 2026-03-10T13:26:19.145 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:19 vm08 ceph-mon[82639]: overall HEALTH_OK 2026-03-10T13:26:19.145 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:19 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:19.145 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:19 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:19.145 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:19 vm08 ceph-mon[82639]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:26:19.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:19 vm00 ceph-mon[47364]: from='client.15318 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:19.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:19 vm00 ceph-mon[47364]: mon.b calling monitor election 2026-03-10T13:26:19.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:19 vm00 ceph-mon[47364]: mon.a calling monitor election 2026-03-10T13:26:19.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:19 vm00 ceph-mon[47364]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T13:26:19.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:19 vm00 ceph-mon[47364]: monmap e3: 3 mons at {a=[v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0],b=[v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0],c=[v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0]} 2026-03-10T13:26:19.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:19 vm00 ceph-mon[47364]: fsmap 2026-03-10T13:26:19.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:19 vm00 ceph-mon[47364]: osdmap e94: 8 total, 8 up, 8 in 2026-03-10T13:26:19.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:19 vm00 ceph-mon[47364]: mgrmap e39: y(active, since 38s), standbys: x 2026-03-10T13:26:19.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:19 vm00 ceph-mon[47364]: overall HEALTH_OK 2026-03-10T13:26:19.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:19 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:19.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:19 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:19.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:19 vm00 ceph-mon[47364]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:26:19.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:19 vm00 ceph-mon[51670]: from='client.15318 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:19.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:19 vm00 ceph-mon[51670]: mon.b calling monitor election 2026-03-10T13:26:19.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:19 vm00 ceph-mon[51670]: mon.a calling monitor election 2026-03-10T13:26:19.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:19 vm00 ceph-mon[51670]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T13:26:19.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:19 vm00 ceph-mon[51670]: monmap e3: 3 mons at {a=[v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0],b=[v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0],c=[v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0]} 2026-03-10T13:26:19.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:19 vm00 ceph-mon[51670]: fsmap 2026-03-10T13:26:19.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:19 vm00 ceph-mon[51670]: osdmap e94: 8 total, 8 up, 8 in 2026-03-10T13:26:19.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:19 vm00 ceph-mon[51670]: mgrmap e39: y(active, since 38s), standbys: x 2026-03-10T13:26:19.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:19 vm00 ceph-mon[51670]: overall HEALTH_OK 2026-03-10T13:26:19.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:19 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:19.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:19 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:19.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:19 vm00 ceph-mon[51670]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:26:20.668 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:20 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:20.668 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:20 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:20.668 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:20 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:20.668 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:20 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:20.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:20 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:20.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:20 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:20.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:20 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:20.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:20 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:20.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:20 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:20.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:20 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:20.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:20 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:20.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:20 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: Detected new or changed devices on vm08 2026-03-10T13:26:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:26:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:26:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:26:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: Upgrade: Setting container_image for all crash 2026-03-10T13:26:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: Upgrade: Setting container_image for all mds 2026-03-10T13:26:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: Upgrade: Setting container_image for all rbd-mirror 2026-03-10T13:26:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: Upgrade: Setting container_image for all cephfs-mirror 2026-03-10T13:26:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-10T13:26:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]': finished 2026-03-10T13:26:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: Upgrade: Setting container_image for all ceph-exporter 2026-03-10T13:26:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: Upgrade: Setting container_image for all nfs 2026-03-10T13:26:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: Upgrade: Setting container_image for all nvmeof 2026-03-10T13:26:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: Upgrade: Setting container_image for all node-exporter 2026-03-10T13:26:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-10T13:26:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: Upgrade: Setting container_image for all prometheus 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: Upgrade: Setting container_image for all alertmanager 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: Upgrade: Setting container_image for all grafana 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: Upgrade: Setting container_image for all loki 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: Upgrade: Setting container_image for all promtail 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: Upgrade: Finalizing container_image settings 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: Upgrade: Complete! 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: Detected new or changed devices on vm08 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:26:22.004 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: Upgrade: Setting container_image for all crash 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: Upgrade: Setting container_image for all mds 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: Upgrade: Setting container_image for all rbd-mirror 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: Upgrade: Setting container_image for all cephfs-mirror 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]': finished 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: Upgrade: Setting container_image for all ceph-exporter 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: Upgrade: Setting container_image for all nfs 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: Upgrade: Setting container_image for all nvmeof 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: Upgrade: Setting container_image for all node-exporter 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: Upgrade: Setting container_image for all prometheus 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: Upgrade: Setting container_image for all alertmanager 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: Upgrade: Setting container_image for all grafana 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: Upgrade: Setting container_image for all loki 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: Upgrade: Setting container_image for all promtail 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: Upgrade: Finalizing container_image settings 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:22.005 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-10T13:26:22.006 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-10T13:26:22.006 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-10T13:26:22.006 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-10T13:26:22.006 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-10T13:26:22.006 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-10T13:26:22.006 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-10T13:26:22.006 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:22.006 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:22.006 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:22.006 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:22.006 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:22.006 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:22.006 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: Upgrade: Complete! 2026-03-10T13:26:22.006 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-10T13:26:22.006 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-10T13:26:22.006 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:26:22.006 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:26:22.006 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:22.006 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:26:22.006 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:26:22.006 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:21 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: Detected new or changed devices on vm08 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all crash 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all mds 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all rbd-mirror 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all cephfs-mirror 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]': finished 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all ceph-exporter 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all nfs 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all nvmeof 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all node-exporter 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all prometheus 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all alertmanager 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all grafana 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all loki 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all promtail 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: Upgrade: Finalizing container_image settings 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-10T13:26:22.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:22.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-10T13:26:22.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-10T13:26:22.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:26:22.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-10T13:26:22.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-10T13:26:22.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-10T13:26:22.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-10T13:26:22.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-10T13:26:22.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-10T13:26:22.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-10T13:26:22.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:22.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-10T13:26:22.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-10T13:26:22.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-10T13:26:22.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-10T13:26:22.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-10T13:26:22.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-10T13:26:22.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-10T13:26:22.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:22.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:22.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:22.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:22.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:22.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:26:22.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: Upgrade: Complete! 2026-03-10T13:26:22.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-10T13:26:22.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-10T13:26:22.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:26:22.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:26:22.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:22.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:26:22.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:26:22.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:21 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:23.252 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:22 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:26:22.882+0000 7ff18fe3c640 -1 mgr.server handle_report got status from non-daemon mon.b 2026-03-10T13:26:24.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:23 vm00 ceph-mon[47364]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:26:24.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:23 vm00 ceph-mon[51670]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:26:24.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:23 vm08 ceph-mon[82639]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:26:26.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:25 vm00 ceph-mon[47364]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:26.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:25 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:26:26.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:25 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:26.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:25 vm00 ceph-mon[51670]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:26.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:25 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:26:26.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:25 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:26.003 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:25 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:26:25] "GET /metrics HTTP/1.1" 200 37555 "" "Prometheus/2.51.0" 2026-03-10T13:26:26.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:25 vm08 ceph-mon[82639]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:26.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:25 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:26:26.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:25 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:27.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:26:26 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:26:26.985Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:26:27.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:26:26 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:26:26.986Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:26:28.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:27 vm00 ceph-mon[47364]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:28.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:27 vm00 ceph-mon[51670]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:28.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:27 vm08 ceph-mon[82639]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:30.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:29 vm00 ceph-mon[47364]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:26:30.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:29 vm00 ceph-mon[51670]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:26:30.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:29 vm08 ceph-mon[82639]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:26:32.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:31 vm00 ceph-mon[47364]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:32.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:31 vm00 ceph-mon[47364]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:26:32.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:31 vm00 ceph-mon[51670]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:32.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:31 vm00 ceph-mon[51670]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:26:32.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:31 vm08 ceph-mon[82639]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:32.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:31 vm08 ceph-mon[82639]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:26:34.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:33 vm00 ceph-mon[47364]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:26:34.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:33 vm00 ceph-mon[51670]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:26:34.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:33 vm08 ceph-mon[82639]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:26:35.980 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:35 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:26:35] "GET /metrics HTTP/1.1" 200 37555 "" "Prometheus/2.51.0" 2026-03-10T13:26:36.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:35 vm00 ceph-mon[47364]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:36.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:35 vm00 ceph-mon[51670]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:36.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:35 vm08 ceph-mon[82639]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:37.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:26:36 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:26:36.985Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:26:37.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:26:36 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:26:36.986Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:26:38.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:38 vm08 ceph-mon[82639]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:26:38.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:38 vm00 ceph-mon[47364]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:26:38.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:38 vm00 ceph-mon[51670]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:26:40.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:40 vm00 ceph-mon[47364]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:26:40.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:40 vm00 ceph-mon[51670]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:26:40.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:40 vm08 ceph-mon[82639]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:26:41.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:41 vm00 ceph-mon[47364]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:41.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:41 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:26:41.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:41 vm00 ceph-mon[47364]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:26:41.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:41 vm00 ceph-mon[51670]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:41.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:41 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:26:41.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:41 vm00 ceph-mon[51670]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:26:41.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:41 vm08 ceph-mon[82639]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:41.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:41 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:26:41.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:41 vm08 ceph-mon[82639]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:26:43.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:43 vm00 ceph-mon[47364]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:26:43.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:43 vm00 ceph-mon[51670]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:26:43.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:43 vm08 ceph-mon[82639]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:26:46.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:45 vm00 ceph-mon[47364]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:46.002 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:45 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:26:45] "GET /metrics HTTP/1.1" 200 37557 "" "Prometheus/2.51.0" 2026-03-10T13:26:46.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:45 vm00 ceph-mon[51670]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:46.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:45 vm08 ceph-mon[82639]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:47.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:26:46 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:26:46.986Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:26:47.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:26:46 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:26:46.986Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:26:47.460 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-10T13:26:48.034 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:47 vm00 ceph-mon[47364]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:26:48.039 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:47 vm00 ceph-mon[51670]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:26:48.069 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T13:26:48.069 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (13m) 48s ago 19m 26.4M - 0.25.0 c8568f914cd2 12fde3cf83cb 2026-03-10T13:26:48.070 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm08 *:3000 running (50s) 28s ago 19m 73.3M - 10.4.0 c8b91775d855 960e32589e98 2026-03-10T13:26:48.070 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dezodo vm00 running (57s) 48s ago 19m 43.0M - 3.5 e1d6a67b021e 630bf6d4e7f3 2026-03-10T13:26:48.070 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm08 *:8443,9283,8765 running (61s) 28s ago 21m 489M - 19.2.3-678-ge911bdeb 654f31e6858e 31b91eebc856 2026-03-10T13:26:48.070 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (7m) 48s ago 22m 556M - 19.2.3-678-ge911bdeb 654f31e6858e 5bc576d4d32b 2026-03-10T13:26:48.070 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (22m) 48s ago 22m 77.3M 2048M 17.2.0 e1d6a67b021e f0e3f322471c 2026-03-10T13:26:48.070 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm08 running (30s) 28s ago 21m 20.4M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 8cceb678a9ee 2026-03-10T13:26:48.070 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (21m) 48s ago 21m 56.2M 2048M 17.2.0 e1d6a67b021e d00b7fd44c23 2026-03-10T13:26:48.070 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (13m) 48s ago 19m 10.4M - 1.7.0 72c9c2088986 bcf883401619 2026-03-10T13:26:48.070 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm08 *:9100 running (13m) 28s ago 19m 10.0M - 1.7.0 72c9c2088986 4ac83f03f818 2026-03-10T13:26:48.070 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (21m) 48s ago 21m 52.9M 4096M 17.2.0 e1d6a67b021e 2919c7073fa7 2026-03-10T13:26:48.070 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (21m) 48s ago 21m 57.2M 4096M 17.2.0 e1d6a67b021e 647927dc41ea 2026-03-10T13:26:48.070 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (21m) 48s ago 21m 55.1M 4096M 17.2.0 e1d6a67b021e 1e417e82c2b9 2026-03-10T13:26:48.070 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (20m) 48s ago 20m 53.3M 4096M 17.2.0 e1d6a67b021e 98bb6d7ea69f 2026-03-10T13:26:48.070 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm08 running (20m) 28s ago 20m 54.1M 4096M 17.2.0 e1d6a67b021e e349440ca776 2026-03-10T13:26:48.070 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm08 running (20m) 28s ago 20m 56.3M 4096M 17.2.0 e1d6a67b021e 9a6a2a67cc99 2026-03-10T13:26:48.070 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm08 running (20m) 28s ago 20m 52.1M 4096M 17.2.0 e1d6a67b021e c27676916d52 2026-03-10T13:26:48.070 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm08 running (20m) 28s ago 20m 54.0M 4096M 17.2.0 e1d6a67b021e bf67951990a5 2026-03-10T13:26:48.070 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm08 *:9095 running (63s) 28s ago 19m 48.1M - 2.51.0 1d3b7f56885b 3f9b2d0821c9 2026-03-10T13:26:48.070 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.tvlvzo vm00 *:8000 running (19m) 48s ago 19m 96.8M - 17.2.0 e1d6a67b021e be24eac16807 2026-03-10T13:26:48.070 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm08.ljayps vm08 *:8000 running (19m) 28s ago 19m 93.9M - 17.2.0 e1d6a67b021e 67b525427823 2026-03-10T13:26:48.147 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.mon | length == 2'"'"'' 2026-03-10T13:26:48.220 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:47 vm08 ceph-mon[82639]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:26:48.697 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T13:26:48.757 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade status' 2026-03-10T13:26:48.949 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:48 vm00 ceph-mon[47364]: from='client.25198 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:48.949 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:48 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/2158092374' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:48.950 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:48 vm00 ceph-mon[51670]: from='client.25198 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:48.950 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:48 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/2158092374' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:49.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:48 vm08 ceph-mon[82639]: from='client.25198 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:49.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:48 vm08 ceph-mon[82639]: from='client.? 192.168.123.100:0/2158092374' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:49.266 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:26:49.266 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": null, 2026-03-10T13:26:49.266 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": false, 2026-03-10T13:26:49.266 INFO:teuthology.orchestra.run.vm00.stdout: "which": "", 2026-03-10T13:26:49.266 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [], 2026-03-10T13:26:49.266 INFO:teuthology.orchestra.run.vm00.stdout: "progress": null, 2026-03-10T13:26:49.266 INFO:teuthology.orchestra.run.vm00.stdout: "message": "", 2026-03-10T13:26:49.266 INFO:teuthology.orchestra.run.vm00.stdout: "is_paused": false 2026-03-10T13:26:49.267 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:26:49.320 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph health detail' 2026-03-10T13:26:49.852 INFO:teuthology.orchestra.run.vm00.stdout:HEALTH_OK 2026-03-10T13:26:49.903 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mon --hosts $(ceph orch ps | grep mgr.y | awk '"'"'{print $2}'"'"')' 2026-03-10T13:26:50.107 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:49 vm00 ceph-mon[51670]: from='client.15324 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:50.107 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:49 vm00 ceph-mon[51670]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:50.107 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:49 vm00 ceph-mon[47364]: from='client.15324 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:50.107 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:49 vm00 ceph-mon[47364]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:50.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:49 vm08 ceph-mon[82639]: from='client.15324 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:50.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:49 vm08 ceph-mon[82639]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:50.990 INFO:teuthology.orchestra.run.vm00.stdout:Initiating upgrade to quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:26:51.052 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'while ceph orch upgrade status | jq '"'"'.in_progress'"'"' | grep true && ! ceph orch upgrade status | jq '"'"'.message'"'"' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done' 2026-03-10T13:26:51.231 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:50 vm00 ceph-mon[47364]: from='client.25213 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:51.231 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:50 vm00 ceph-mon[47364]: from='client.? 192.168.123.100:0/3253025815' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T13:26:51.231 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:50 vm00 ceph-mon[51670]: from='client.25213 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:51.231 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:50 vm00 ceph-mon[51670]: from='client.? 192.168.123.100:0/3253025815' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T13:26:51.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:50 vm08 ceph-mon[82639]: from='client.25213 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:51.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:50 vm08 ceph-mon[82639]: from='client.? 192.168.123.100:0/3253025815' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T13:26:51.666 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T13:26:52.175 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T13:26:52.175 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (13m) 52s ago 19m 26.4M - 0.25.0 c8568f914cd2 12fde3cf83cb 2026-03-10T13:26:52.175 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm08 *:3000 running (54s) 32s ago 19m 73.3M - 10.4.0 c8b91775d855 960e32589e98 2026-03-10T13:26:52.175 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dezodo vm00 running (61s) 52s ago 19m 43.0M - 3.5 e1d6a67b021e 630bf6d4e7f3 2026-03-10T13:26:52.175 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm08 *:8443,9283,8765 running (65s) 32s ago 21m 489M - 19.2.3-678-ge911bdeb 654f31e6858e 31b91eebc856 2026-03-10T13:26:52.175 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (7m) 52s ago 22m 556M - 19.2.3-678-ge911bdeb 654f31e6858e 5bc576d4d32b 2026-03-10T13:26:52.175 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (22m) 52s ago 22m 77.3M 2048M 17.2.0 e1d6a67b021e f0e3f322471c 2026-03-10T13:26:52.175 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm08 running (34s) 32s ago 21m 20.4M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 8cceb678a9ee 2026-03-10T13:26:52.175 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (21m) 52s ago 21m 56.2M 2048M 17.2.0 e1d6a67b021e d00b7fd44c23 2026-03-10T13:26:52.176 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (13m) 52s ago 20m 10.4M - 1.7.0 72c9c2088986 bcf883401619 2026-03-10T13:26:52.176 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm08 *:9100 running (13m) 32s ago 20m 10.0M - 1.7.0 72c9c2088986 4ac83f03f818 2026-03-10T13:26:52.176 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (21m) 52s ago 21m 52.9M 4096M 17.2.0 e1d6a67b021e 2919c7073fa7 2026-03-10T13:26:52.176 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (21m) 52s ago 21m 57.2M 4096M 17.2.0 e1d6a67b021e 647927dc41ea 2026-03-10T13:26:52.176 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (21m) 52s ago 21m 55.1M 4096M 17.2.0 e1d6a67b021e 1e417e82c2b9 2026-03-10T13:26:52.176 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (20m) 52s ago 20m 53.3M 4096M 17.2.0 e1d6a67b021e 98bb6d7ea69f 2026-03-10T13:26:52.176 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm08 running (20m) 32s ago 20m 54.1M 4096M 17.2.0 e1d6a67b021e e349440ca776 2026-03-10T13:26:52.176 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm08 running (20m) 32s ago 20m 56.3M 4096M 17.2.0 e1d6a67b021e 9a6a2a67cc99 2026-03-10T13:26:52.176 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm08 running (20m) 32s ago 20m 52.1M 4096M 17.2.0 e1d6a67b021e c27676916d52 2026-03-10T13:26:52.176 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm08 running (20m) 32s ago 20m 54.0M 4096M 17.2.0 e1d6a67b021e bf67951990a5 2026-03-10T13:26:52.176 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm08 *:9095 running (67s) 32s ago 19m 48.1M - 2.51.0 1d3b7f56885b 3f9b2d0821c9 2026-03-10T13:26:52.176 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.tvlvzo vm00 *:8000 running (19m) 52s ago 19m 96.8M - 17.2.0 e1d6a67b021e be24eac16807 2026-03-10T13:26:52.176 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm08.ljayps vm08 *:8000 running (19m) 32s ago 19m 93.9M - 17.2.0 e1d6a67b021e 67b525427823 2026-03-10T13:26:52.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:51 vm00 ceph-mon[47364]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:52.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:51 vm00 ceph-mon[47364]: from='client.15348 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:52.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:51 vm00 ceph-mon[47364]: from='client.15354 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "mon", "hosts": "vm00", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:52.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:51 vm00 ceph-mon[47364]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:26:52.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:51 vm00 ceph-mon[47364]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:26:52.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:51 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:52.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:51 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:26:52.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:51 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:26:52.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:51 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:52.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:51 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:52.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:51 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:26:52.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:51 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:52.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:51 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:52.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:51 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-10T13:26:52.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:51 vm00 ceph-mon[47364]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["a"]}]: dispatch 2026-03-10T13:26:52.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:51 vm00 ceph-mon[51670]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:52.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:51 vm00 ceph-mon[51670]: from='client.15348 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:52.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:51 vm00 ceph-mon[51670]: from='client.15354 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "mon", "hosts": "vm00", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:52.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:51 vm00 ceph-mon[51670]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:26:52.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:51 vm00 ceph-mon[51670]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:26:52.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:51 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:52.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:51 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:26:52.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:51 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:26:52.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:51 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:52.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:51 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:52.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:51 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:26:52.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:51 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:52.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:51 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:52.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:51 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-10T13:26:52.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:51 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["a"]}]: dispatch 2026-03-10T13:26:52.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:51 vm08 ceph-mon[82639]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:26:52.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:51 vm08 ceph-mon[82639]: from='client.15348 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:52.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:51 vm08 ceph-mon[82639]: from='client.15354 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "mon", "hosts": "vm00", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:52.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:51 vm08 ceph-mon[82639]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:26:52.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:51 vm08 ceph-mon[82639]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:26:52.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:51 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:52.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:51 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:26:52.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:51 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:26:52.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:51 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:52.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:51 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:52.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:51 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:26:52.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:51 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:26:52.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:51 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' 2026-03-10T13:26:52.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:51 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-10T13:26:52.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:51 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["a"]}]: dispatch 2026-03-10T13:26:52.473 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:26:52.473 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T13:26:52.473 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2, 2026-03-10T13:26:52.473 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 1 2026-03-10T13:26:52.473 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:26:52.473 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T13:26:52.473 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T13:26:52.473 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:26:52.473 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T13:26:52.473 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-10T13:26:52.473 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:26:52.473 INFO:teuthology.orchestra.run.vm00.stdout: "mds": {}, 2026-03-10T13:26:52.473 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T13:26:52.473 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-10T13:26:52.473 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:26:52.473 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T13:26:52.473 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 12, 2026-03-10T13:26:52.473 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-10T13:26:52.473 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T13:26:52.473 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:26:52.730 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:26:52.731 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T13:26:52.731 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": true, 2026-03-10T13:26:52.731 INFO:teuthology.orchestra.run.vm00.stdout: "which": "Upgrading daemons of type(s) mon on host(s) vm00", 2026-03-10T13:26:52.731 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [], 2026-03-10T13:26:52.731 INFO:teuthology.orchestra.run.vm00.stdout: "progress": "0/2 daemons upgraded", 2026-03-10T13:26:52.731 INFO:teuthology.orchestra.run.vm00.stdout: "message": "Currently upgrading mon daemons", 2026-03-10T13:26:52.731 INFO:teuthology.orchestra.run.vm00.stdout: "is_paused": false 2026-03-10T13:26:52.731 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:26:52.926 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:52 vm00 systemd[1]: Stopping Ceph mon.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:26:52.926 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:52 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mon-a[47360]: 2026-03-10T13:26:52.708+0000 7f5d46c7e700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.a -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T13:26:52.926 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:52 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mon-a[47360]: 2026-03-10T13:26:52.708+0000 7f5d46c7e700 -1 mon.a@0(leader) e3 *** Got Signal Terminated *** 2026-03-10T13:26:52.926 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:52 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:26:52] ENGINE Bus STOPPING 2026-03-10T13:26:53.182 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:52 vm00 podman[94335]: 2026-03-10 13:26:52.925369196 +0000 UTC m=+0.236328126 container died f0e3f322471c0544872bb7f4e33501eee547f6caedbfb59fb6b15f69a2ebb80b (image=quay.io/ceph/ceph:v17.2.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mon-a, maintainer=Guillaume Abrioux , GIT_BRANCH=HEAD, vcs-type=git, com.redhat.component=centos-stream-container, vendor=Red Hat, Inc., description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.buildah.version=1.19.8, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, GIT_CLEAN=True, io.k8s.display-name=CentOS Stream 8, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, version=8, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, distribution-scope=public, release=754, GIT_REPO=https://github.com/ceph/ceph-container.git, io.openshift.tags=base centos centos-stream, name=centos-stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, io.openshift.expose-services=, architecture=x86_64, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, build-date=2022-05-03T08:36:31.336870, CEPH_POINT_RELEASE=-17.2.0, ceph=True, RELEASE=HEAD) 2026-03-10T13:26:53.182 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:52 vm00 podman[94335]: 2026-03-10 13:26:52.942426056 +0000 UTC m=+0.253384986 container remove f0e3f322471c0544872bb7f4e33501eee547f6caedbfb59fb6b15f69a2ebb80b (image=quay.io/ceph/ceph:v17.2.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mon-a, GIT_BRANCH=HEAD, io.openshift.expose-services=, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vendor=Red Hat, Inc., GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, distribution-scope=public, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, maintainer=Guillaume Abrioux , release=754, GIT_CLEAN=True, ceph=True, version=8, GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., CEPH_POINT_RELEASE=-17.2.0, name=centos-stream, build-date=2022-05-03T08:36:31.336870, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, io.openshift.tags=base centos centos-stream, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, com.redhat.component=centos-stream-container, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, RELEASE=HEAD, architecture=x86_64, io.k8s.display-name=CentOS Stream 8, io.buildah.version=1.19.8, vcs-type=git) 2026-03-10T13:26:53.182 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:52 vm00 bash[94335]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mon-a 2026-03-10T13:26:53.182 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mon.a.service: Deactivated successfully. 2026-03-10T13:26:53.182 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 systemd[1]: Stopped Ceph mon.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:26:53.182 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mon.a.service: Consumed 15.332s CPU time. 2026-03-10T13:26:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 systemd[1]: Starting Ceph mon.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:26:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 podman[94455]: 2026-03-10 13:26:53.294291856 +0000 UTC m=+0.022888179 container create 981df6371890f478c2678e0424f375e239072c3793548f2c3a6d2db61ac47a21 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mon-a, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, ceph=True, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T13:26:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 podman[94455]: 2026-03-10 13:26:53.336470935 +0000 UTC m=+0.065067258 container init 981df6371890f478c2678e0424f375e239072c3793548f2c3a6d2db61ac47a21 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mon-a, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.build-date=20260223) 2026-03-10T13:26:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 podman[94455]: 2026-03-10 13:26:53.340078417 +0000 UTC m=+0.068674740 container start 981df6371890f478c2678e0424f375e239072c3793548f2c3a6d2db61ac47a21 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mon-a, ceph=True, io.buildah.version=1.41.3, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default) 2026-03-10T13:26:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 bash[94455]: 981df6371890f478c2678e0424f375e239072c3793548f2c3a6d2db61ac47a21 2026-03-10T13:26:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 podman[94455]: 2026-03-10 13:26:53.28496173 +0000 UTC m=+0.013558053 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:26:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 systemd[1]: Started Ceph mon.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:26:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: set uid:gid to 167:167 (ceph:ceph) 2026-03-10T13:26:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable), process ceph-mon, pid 2 2026-03-10T13:26:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: pidfile_write: ignore empty --pid-file 2026-03-10T13:26:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: load: jerasure load: lrc 2026-03-10T13:26:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: RocksDB version: 7.9.2 2026-03-10T13:26:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Git sha 0 2026-03-10T13:26:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Compile date 2026-02-25 18:11:04 2026-03-10T13:26:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: DB SUMMARY 2026-03-10T13:26:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: DB Session ID: 7TEOS7B2UP6NQIQ4B1A5 2026-03-10T13:26:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: CURRENT file: CURRENT 2026-03-10T13:26:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: IDENTITY file: IDENTITY 2026-03-10T13:26:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: MANIFEST file: MANIFEST-000015 size: 2144 Bytes 2026-03-10T13:26:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: SST files in /var/lib/ceph/mon/ceph-a/store.db dir, Total Num: 1, files: 000048.sst 2026-03-10T13:26:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-a/store.db: 000046.log size: 847806 ; 2026-03-10T13:26:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.error_if_exists: 0 2026-03-10T13:26:53.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.create_if_missing: 0 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.paranoid_checks: 1 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.flush_verify_memtable_count: 1 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.env: 0x55d9116d9dc0 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.fs: PosixFileSystem 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.info_log: 0x55d913d9f7e0 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.max_file_opening_threads: 16 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.statistics: (nil) 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.use_fsync: 0 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.max_log_file_size: 0 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.keep_log_file_num: 1000 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.recycle_log_file_num: 0 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.allow_fallocate: 1 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.allow_mmap_reads: 0 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.allow_mmap_writes: 0 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.use_direct_reads: 0 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.create_missing_column_families: 0 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.db_log_dir: 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.wal_dir: 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.advise_random_on_open: 1 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.db_write_buffer_size: 0 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.write_buffer_manager: 0x55d913da3900 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.rate_limiter: (nil) 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.wal_recovery_mode: 2 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.enable_thread_tracking: 0 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.enable_pipelined_write: 0 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.unordered_write: 0 2026-03-10T13:26:53.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.row_cache: None 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.wal_filter: None 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.allow_ingest_behind: 0 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.two_write_queues: 0 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.manual_wal_flush: 0 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.wal_compression: 0 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.atomic_flush: 0 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.log_readahead_size: 0 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.best_efforts_recovery: 0 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.allow_data_in_errors: 0 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.db_host_id: __hostname__ 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.enforce_single_del_contracts: true 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.max_background_jobs: 2 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.max_background_compactions: -1 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.max_subcompactions: 1 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.max_total_wal_size: 0 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.max_open_files: -1 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.bytes_per_sync: 0 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.compaction_readahead_size: 0 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.max_background_flushes: -1 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Compression algorithms supported: 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: kZSTD supported: 0 2026-03-10T13:26:53.506 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: kXpressCompression supported: 0 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: kBZip2Compression supported: 0 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: kLZ4Compression supported: 1 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: kZlibCompression supported: 1 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: kLZ4HCCompression supported: 1 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: kSnappyCompression supported: 1 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: DMutex implementation: pthread_mutex_t 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-a/store.db/MANIFEST-000015 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.merge_operator: 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.compaction_filter: None 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.compaction_filter_factory: None 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.sst_partitioner_factory: None 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x55d913d9f440) 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout: cache_index_and_filter_blocks: 1 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout: pin_top_level_index_and_filter: 1 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout: index_type: 0 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout: data_block_index_type: 0 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout: index_shortening: 1 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout: checksum: 4 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout: no_block_cache: 0 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout: block_cache: 0x55d913dc29b0 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout: block_cache_name: BinnedLRUCache 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout: block_cache_options: 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout: capacity : 536870912 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout: num_shard_bits : 4 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout: strict_capacity_limit : 0 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout: high_pri_pool_ratio: 0.000 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout: block_cache_compressed: (nil) 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout: persistent_cache: (nil) 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout: block_size: 4096 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout: block_size_deviation: 10 2026-03-10T13:26:53.507 INFO:journalctl@ceph.mon.a.vm00.stdout: block_restart_interval: 16 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout: index_block_restart_interval: 1 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout: metadata_block_size: 4096 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout: partition_filters: 0 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout: use_delta_encoding: 1 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout: filter_policy: bloomfilter 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout: whole_key_filtering: 1 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout: verify_compression: 0 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout: read_amp_bytes_per_bit: 0 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout: format_version: 5 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout: enable_index_compression: 1 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout: block_align: 0 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout: max_auto_readahead_size: 262144 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout: prepopulate_block_cache: 0 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout: initial_auto_readahead_size: 8192 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout: num_file_reads_for_auto_readahead: 2 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.write_buffer_size: 33554432 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.max_write_buffer_number: 2 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.compression: NoCompression 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.bottommost_compression: Disabled 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.prefix_extractor: nullptr 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.num_levels: 7 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.compression_opts.level: 32767 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.compression_opts.strategy: 0 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-10T13:26:53.508 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.compression_opts.enabled: false 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.target_file_size_base: 67108864 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.arena_block_size: 1048576 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.disable_auto_compactions: 0 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.inplace_update_support: 0 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-10T13:26:53.509 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.bloom_locality: 0 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.max_successive_merges: 0 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.paranoid_file_checks: 0 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.force_consistency_checks: 1 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.report_bg_io_stats: 0 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.ttl: 2592000 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.preclude_last_level_data_seconds: 0 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.preserve_internal_time_seconds: 0 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.enable_blob_files: false 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.min_blob_size: 0 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.blob_file_size: 268435456 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.blob_compaction_readahead_size: 0 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.blob_file_starting_level: 0 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: [table/block_based/block_based_table_reader.cc:721] At least one SST file opened without unique ID to verify: 48.sst 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed. 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-a/store.db/MANIFEST-000015 succeeded,manifest_file_number is 15, next_file_number is 50, last_sequence is 21963, log_number is 46,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 46 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: b77c5ea0-6a5c-45b1-a918-1ddc3f4ad619 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773149213367175, "job": 1, "event": "recovery_started", "wal_files": [46]} 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #46 mode 2 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773149213371640, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 51, "file_size": 774043, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 21964, "largest_seqno": 22781, "table_properties": {"data_size": 770169, "index_size": 1815, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 1029, "raw_key_size": 10343, "raw_average_key_size": 26, "raw_value_size": 762027, "raw_average_value_size": 1953, "num_data_blocks": 82, "num_entries": 390, "num_filter_entries": 390, "num_deletions": 8, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1773149213, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "b77c5ea0-6a5c-45b1-a918-1ddc3f4ad619", "db_session_id": "7TEOS7B2UP6NQIQ4B1A5", "orig_file_number": 51, "seqno_to_time_mapping": "N/A"}} 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773149213371706, "job": 1, "event": "recovery_finished"} 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: [db/version_set.cc:5047] Creating manifest 53 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed. 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-a/store.db/000046.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x55d913dc4e00 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: DB pointer 0x55d913dd4000 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:53 vm00 ceph-mon[94470]: rocksdb: [db/db_impl/db_impl.cc:1111] 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout: ** DB Stats ** 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-10T13:26:53.510 INFO:journalctl@ceph.mon.a.vm00.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-10T13:26:53.511 INFO:journalctl@ceph.mon.a.vm00.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-10T13:26:53.511 INFO:journalctl@ceph.mon.a.vm00.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-10T13:26:53.511 INFO:journalctl@ceph.mon.a.vm00.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-10T13:26:53.511 INFO:journalctl@ceph.mon.a.vm00.stdout: 2026-03-10T13:26:53.511 INFO:journalctl@ceph.mon.a.vm00.stdout: ** Compaction Stats [default] ** 2026-03-10T13:26:53.511 INFO:journalctl@ceph.mon.a.vm00.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-10T13:26:53.511 INFO:journalctl@ceph.mon.a.vm00.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-03-10T13:26:53.511 INFO:journalctl@ceph.mon.a.vm00.stdout: L0 1/0 755.90 KB 0.2 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 211.3 0.00 0.00 1 0.003 0 0 0.0 0.0 2026-03-10T13:26:53.511 INFO:journalctl@ceph.mon.a.vm00.stdout: L6 1/0 9.48 MB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0 0.0 0.0 2026-03-10T13:26:53.511 INFO:journalctl@ceph.mon.a.vm00.stdout: Sum 2/0 10.22 MB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 211.3 0.00 0.00 1 0.003 0 0 0.0 0.0 2026-03-10T13:26:53.511 INFO:journalctl@ceph.mon.a.vm00.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 211.3 0.00 0.00 1 0.003 0 0 0.0 0.0 2026-03-10T13:26:53.511 INFO:journalctl@ceph.mon.a.vm00.stdout: 2026-03-10T13:26:53.511 INFO:journalctl@ceph.mon.a.vm00.stdout: ** Compaction Stats [default] ** 2026-03-10T13:26:53.511 INFO:journalctl@ceph.mon.a.vm00.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-10T13:26:53.511 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:26:53] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-10T13:26:53.511 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:26:53] ENGINE Bus STOPPED 2026-03-10T13:26:53.511 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:26:53] ENGINE Bus STARTING 2026-03-10T13:26:53.511 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:26:53] ENGINE Serving on http://:::9283 2026-03-10T13:26:53.511 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:53 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:26:53] ENGINE Bus STARTED 2026-03-10T13:26:53.511 INFO:journalctl@ceph.mon.a.vm00.stdout: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-10T13:26:53.511 INFO:journalctl@ceph.mon.a.vm00.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 211.3 0.00 0.00 1 0.003 0 0 0.0 0.0 2026-03-10T13:26:53.511 INFO:journalctl@ceph.mon.a.vm00.stdout: 2026-03-10T13:26:53.511 INFO:journalctl@ceph.mon.a.vm00.stdout: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-03-10T13:26:53.511 INFO:journalctl@ceph.mon.a.vm00.stdout: 2026-03-10T13:26:53.511 INFO:journalctl@ceph.mon.a.vm00.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-10T13:26:53.511 INFO:journalctl@ceph.mon.a.vm00.stdout: Flush(GB): cumulative 0.001, interval 0.001 2026-03-10T13:26:53.511 INFO:journalctl@ceph.mon.a.vm00.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-10T13:26:53.511 INFO:journalctl@ceph.mon.a.vm00.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-10T13:26:53.511 INFO:journalctl@ceph.mon.a.vm00.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-10T13:26:53.511 INFO:journalctl@ceph.mon.a.vm00.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-10T13:26:53.511 INFO:journalctl@ceph.mon.a.vm00.stdout: Cumulative compaction: 0.00 GB write, 93.19 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-10T13:26:53.511 INFO:journalctl@ceph.mon.a.vm00.stdout: Interval compaction: 0.00 GB write, 93.19 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-10T13:26:53.511 INFO:journalctl@ceph.mon.a.vm00.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-10T13:26:53.511 INFO:journalctl@ceph.mon.a.vm00.stdout: Block cache BinnedLRUCache@0x55d913dc29b0#2 capacity: 512.00 MB usage: 3.23 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 1e-05 secs_since: 0 2026-03-10T13:26:53.512 INFO:journalctl@ceph.mon.a.vm00.stdout: Block cache entry stats(count,size,portion): FilterBlock(1,1.16 KB,0.000220537%) IndexBlock(1,2.08 KB,0.000396371%) Misc(1,0.00 KB,0%) 2026-03-10T13:26:53.512 INFO:journalctl@ceph.mon.a.vm00.stdout: 2026-03-10T13:26:53.512 INFO:journalctl@ceph.mon.a.vm00.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-10T13:26:54.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[94470]: Deploying daemon mon.a on vm00 2026-03-10T13:26:54.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[94470]: from='client.25234 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:54.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[94470]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:26:54.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[94470]: from='client.34154 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:54.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[94470]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:26:54.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[94470]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:26:54.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[94470]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:26:54.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[94470]: mon.a calling monitor election 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[94470]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[94470]: monmap epoch 3 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[94470]: fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[94470]: last_changed 2026-03-10T13:05:10.450629+0000 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[94470]: created 2026-03-10T13:04:24.045109+0000 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[94470]: min_mon_release 17 (quincy) 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[94470]: election_strategy: 1 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[94470]: 0: [v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0] mon.a 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[94470]: 1: [v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0] mon.c 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[94470]: 2: [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] mon.b 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[94470]: fsmap 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[94470]: osdmap e94: 8 total, 8 up, 8 in 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[94470]: mgrmap e39: y(active, since 74s), standbys: x 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[94470]: overall HEALTH_OK 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[94470]: from='mgr.24955 ' entity='' 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[94470]: mgrmap e40: y(active, since 74s), standbys: x 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:54 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ignoring --setuser ceph since I am not root 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:54 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ignoring --setgroup ceph since I am not root 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:54 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:26:54.144+0000 7f14799f1140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:54 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:26:54.188+0000 7f14799f1140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[51670]: Deploying daemon mon.a on vm00 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[51670]: from='client.25234 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[51670]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[51670]: from='client.34154 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[51670]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[51670]: mon.a calling monitor election 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[51670]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[51670]: monmap epoch 3 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[51670]: fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[51670]: last_changed 2026-03-10T13:05:10.450629+0000 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[51670]: created 2026-03-10T13:04:24.045109+0000 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[51670]: min_mon_release 17 (quincy) 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[51670]: election_strategy: 1 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[51670]: 0: [v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0] mon.a 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[51670]: 1: [v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0] mon.c 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[51670]: 2: [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] mon.b 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[51670]: fsmap 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[51670]: osdmap e94: 8 total, 8 up, 8 in 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[51670]: mgrmap e39: y(active, since 74s), standbys: x 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[51670]: overall HEALTH_OK 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[51670]: from='mgr.24955 ' entity='' 2026-03-10T13:26:54.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:54 vm00 ceph-mon[51670]: mgrmap e40: y(active, since 74s), standbys: x 2026-03-10T13:26:54.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:26:54 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: ignoring --setuser ceph since I am not root 2026-03-10T13:26:54.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:26:54 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: ignoring --setgroup ceph since I am not root 2026-03-10T13:26:54.521 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:26:54 vm08 ceph-mgr[79713]: -- 192.168.123.108:0/4150416301 <== mon.2 v2:192.168.123.108:3300/0 4 ==== auth_reply(proto 2 0 (0) Success) ==== 194+0+0 (secure 0 0 0) 0x55e962e674a0 con 0x55e962e44800 2026-03-10T13:26:54.521 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:26:54 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:26:54.139+0000 7f469b944140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-10T13:26:54.521 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:26:54 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:26:54.185+0000 7f469b944140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T13:26:54.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:54 vm08 ceph-mon[82639]: Deploying daemon mon.a on vm00 2026-03-10T13:26:54.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:54 vm08 ceph-mon[82639]: from='client.25234 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:54.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:54 vm08 ceph-mon[82639]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:26:54.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:54 vm08 ceph-mon[82639]: from='client.34154 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:26:54.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:54 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:26:54.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:54 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:26:54.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:54 vm08 ceph-mon[82639]: from='mgr.24955 192.168.123.100:0/763703355' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:26:54.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:54 vm08 ceph-mon[82639]: mon.a calling monitor election 2026-03-10T13:26:54.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:54 vm08 ceph-mon[82639]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T13:26:54.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:54 vm08 ceph-mon[82639]: monmap epoch 3 2026-03-10T13:26:54.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:54 vm08 ceph-mon[82639]: fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 2026-03-10T13:26:54.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:54 vm08 ceph-mon[82639]: last_changed 2026-03-10T13:05:10.450629+0000 2026-03-10T13:26:54.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:54 vm08 ceph-mon[82639]: created 2026-03-10T13:04:24.045109+0000 2026-03-10T13:26:54.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:54 vm08 ceph-mon[82639]: min_mon_release 17 (quincy) 2026-03-10T13:26:54.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:54 vm08 ceph-mon[82639]: election_strategy: 1 2026-03-10T13:26:54.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:54 vm08 ceph-mon[82639]: 0: [v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0] mon.a 2026-03-10T13:26:54.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:54 vm08 ceph-mon[82639]: 1: [v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0] mon.c 2026-03-10T13:26:54.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:54 vm08 ceph-mon[82639]: 2: [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] mon.b 2026-03-10T13:26:54.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:54 vm08 ceph-mon[82639]: fsmap 2026-03-10T13:26:54.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:54 vm08 ceph-mon[82639]: osdmap e94: 8 total, 8 up, 8 in 2026-03-10T13:26:54.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:54 vm08 ceph-mon[82639]: mgrmap e39: y(active, since 74s), standbys: x 2026-03-10T13:26:54.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:54 vm08 ceph-mon[82639]: overall HEALTH_OK 2026-03-10T13:26:54.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:54 vm08 ceph-mon[82639]: from='mgr.24955 ' entity='' 2026-03-10T13:26:54.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:54 vm08 ceph-mon[82639]: mgrmap e40: y(active, since 74s), standbys: x 2026-03-10T13:26:54.927 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:26:54 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:26:54.603+0000 7f469b944140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T13:26:54.928 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:54 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:26:54.605+0000 7f14799f1140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T13:26:55.252 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:54 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:26:54.927+0000 7f14799f1140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T13:26:55.252 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:55 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-10T13:26:55.252 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:55 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-10T13:26:55.252 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:55 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: from numpy import show_config as show_numpy_config 2026-03-10T13:26:55.252 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:55 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:26:55.043+0000 7f14799f1140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T13:26:55.252 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:55 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:26:55.085+0000 7f14799f1140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T13:26:55.252 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:55 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:26:55.157+0000 7f14799f1140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T13:26:55.270 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:26:54 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:26:54.926+0000 7f469b944140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T13:26:55.270 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:26:55 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-10T13:26:55.270 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:26:55 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-10T13:26:55.270 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:26:55 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: from numpy import show_config as show_numpy_config 2026-03-10T13:26:55.270 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:26:55 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:26:55.025+0000 7f469b944140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T13:26:55.270 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:26:55 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:26:55.068+0000 7f469b944140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T13:26:55.270 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:26:55 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:26:55.140+0000 7f469b944140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T13:26:55.907 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:26:55 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:26:55.654+0000 7f469b944140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T13:26:55.907 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:26:55 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:26:55.784+0000 7f469b944140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T13:26:55.907 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:26:55 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:26:55.828+0000 7f469b944140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T13:26:55.907 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:26:55 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:26:55.864+0000 7f469b944140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T13:26:55.976 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:55 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:26:55.695+0000 7f14799f1140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T13:26:55.976 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:55 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:26:55.818+0000 7f14799f1140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T13:26:55.976 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:55 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:26:55.860+0000 7f14799f1140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T13:26:55.976 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:55 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:26:55.895+0000 7f14799f1140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T13:26:55.976 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:55 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:26:55.936+0000 7f14799f1140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T13:26:55.977 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:55 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:26:55.974+0000 7f14799f1140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T13:26:56.172 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:26:55 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:26:55.906+0000 7f469b944140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T13:26:56.172 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:26:55 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:26:55.943+0000 7f469b944140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T13:26:56.172 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:26:56 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:26:56.120+0000 7f469b944140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T13:26:56.172 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:56 vm08 ceph-mon[82639]: mgrmap e41: y(active, since 75s), standbys: x 2026-03-10T13:26:56.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:56 vm00 ceph-mon[94470]: mgrmap e41: y(active, since 75s), standbys: x 2026-03-10T13:26:56.252 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:56 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:26:56.150+0000 7f14799f1140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T13:26:56.252 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:56 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:26:56.201+0000 7f14799f1140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T13:26:56.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:56 vm00 ceph-mon[51670]: mgrmap e41: y(active, since 75s), standbys: x 2026-03-10T13:26:56.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:26:56 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:26:56.171+0000 7f469b944140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T13:26:56.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:26:56 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:26:56.389+0000 7f469b944140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-10T13:26:56.695 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:56 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:26:56.419+0000 7f14799f1140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-10T13:26:56.942 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:26:56 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:26:56.664+0000 7f469b944140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T13:26:56.942 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:26:56 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:26:56.703+0000 7f469b944140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T13:26:56.942 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:26:56 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:26:56.745+0000 7f469b944140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T13:26:56.942 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:26:56 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:26:56.825+0000 7f469b944140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T13:26:56.942 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:26:56 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:26:56.861+0000 7f469b944140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T13:26:56.971 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:56 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:26:56.693+0000 7f14799f1140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T13:26:56.971 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:56 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:26:56.730+0000 7f14799f1140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T13:26:56.971 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:56 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:26:56.773+0000 7f14799f1140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T13:26:56.971 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:56 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:26:56.851+0000 7f14799f1140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T13:26:56.971 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:56 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:26:56.888+0000 7f14799f1140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T13:26:57.193 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:26:56 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:26:56.941+0000 7f469b944140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T13:26:57.193 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:26:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:26:57.056+0000 7f469b944140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T13:26:57.223 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:56 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:26:56.969+0000 7f14799f1140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T13:26:57.223 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:57 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:26:57.085+0000 7f14799f1140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T13:26:57.223 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:26:56 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:26:56.987Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:26:57.223 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:26:56 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:26:56.989Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:26:57.289 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:57 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:26:57.221+0000 7f14799f1140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T13:26:57.289 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:57 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:26:57.260+0000 7f14799f1140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T13:26:57.289 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:57 vm00 ceph-mon[94470]: Standby manager daemon x restarted 2026-03-10T13:26:57.289 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:57 vm00 ceph-mon[94470]: Standby manager daemon x started 2026-03-10T13:26:57.289 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:57 vm00 ceph-mon[94470]: from='mgr.? 192.168.123.108:0/726282665' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T13:26:57.289 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:57 vm00 ceph-mon[94470]: from='mgr.? 192.168.123.108:0/726282665' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T13:26:57.289 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:57 vm00 ceph-mon[94470]: from='mgr.? 192.168.123.108:0/726282665' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T13:26:57.289 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:57 vm00 ceph-mon[94470]: from='mgr.? 192.168.123.108:0/726282665' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T13:26:57.289 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:57 vm00 ceph-mon[94470]: Active manager daemon y restarted 2026-03-10T13:26:57.289 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:57 vm00 ceph-mon[94470]: Activating manager daemon y 2026-03-10T13:26:57.289 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:57 vm00 ceph-mon[94470]: osdmap e95: 8 total, 8 up, 8 in 2026-03-10T13:26:57.289 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:57 vm00 ceph-mon[94470]: mgrmap e42: y(active, starting, since 0.00650509s), standbys: x 2026-03-10T13:26:57.289 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:57 vm00 ceph-mon[51670]: Standby manager daemon x restarted 2026-03-10T13:26:57.289 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:57 vm00 ceph-mon[51670]: Standby manager daemon x started 2026-03-10T13:26:57.289 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:57 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.108:0/726282665' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T13:26:57.289 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:57 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.108:0/726282665' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T13:26:57.289 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:57 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.108:0/726282665' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T13:26:57.289 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:57 vm00 ceph-mon[51670]: from='mgr.? 192.168.123.108:0/726282665' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T13:26:57.289 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:57 vm00 ceph-mon[51670]: Active manager daemon y restarted 2026-03-10T13:26:57.289 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:57 vm00 ceph-mon[51670]: Activating manager daemon y 2026-03-10T13:26:57.289 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:57 vm00 ceph-mon[51670]: osdmap e95: 8 total, 8 up, 8 in 2026-03-10T13:26:57.502 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:57 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:26:57] ENGINE Bus STARTING 2026-03-10T13:26:57.503 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:57 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: CherryPy Checker: 2026-03-10T13:26:57.503 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:57 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: The Application mounted at '' has an empty config. 2026-03-10T13:26:57.503 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:57 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:26:57.503 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:57 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:26:57] ENGINE Serving on http://:::9283 2026-03-10T13:26:57.503 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:57 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:26:57] ENGINE Bus STARTED 2026-03-10T13:26:57.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:57 vm00 ceph-mon[51670]: mgrmap e42: y(active, starting, since 0.00650509s), standbys: x 2026-03-10T13:26:57.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:26:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:26:57.192+0000 7f469b944140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T13:26:57.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:26:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:26:57.229+0000 7f469b944140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T13:26:57.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:26:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: [10/Mar/2026:13:26:57] ENGINE Bus STARTING 2026-03-10T13:26:57.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:26:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: CherryPy Checker: 2026-03-10T13:26:57.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:26:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: The Application mounted at '' has an empty config. 2026-03-10T13:26:57.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:26:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: 2026-03-10T13:26:57.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:26:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: [10/Mar/2026:13:26:57] ENGINE Serving on http://:::9283 2026-03-10T13:26:57.520 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:26:57 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x[79709]: [10/Mar/2026:13:26:57] ENGINE Bus STARTED 2026-03-10T13:26:57.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:57 vm08 ceph-mon[82639]: Standby manager daemon x restarted 2026-03-10T13:26:57.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:57 vm08 ceph-mon[82639]: Standby manager daemon x started 2026-03-10T13:26:57.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:57 vm08 ceph-mon[82639]: from='mgr.? 192.168.123.108:0/726282665' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T13:26:57.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:57 vm08 ceph-mon[82639]: from='mgr.? 192.168.123.108:0/726282665' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T13:26:57.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:57 vm08 ceph-mon[82639]: from='mgr.? 192.168.123.108:0/726282665' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T13:26:57.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:57 vm08 ceph-mon[82639]: from='mgr.? 192.168.123.108:0/726282665' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T13:26:57.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:57 vm08 ceph-mon[82639]: Active manager daemon y restarted 2026-03-10T13:26:57.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:57 vm08 ceph-mon[82639]: Activating manager daemon y 2026-03-10T13:26:57.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:57 vm08 ceph-mon[82639]: osdmap e95: 8 total, 8 up, 8 in 2026-03-10T13:26:57.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:57 vm08 ceph-mon[82639]: mgrmap e42: y(active, starting, since 0.00650509s), standbys: x 2026-03-10T13:26:58.479 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:58 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:26:58.479 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:58 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:26:58.479 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:58 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:26:58.479 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:58 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T13:26:58.479 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:58 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T13:26:58.479 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:58 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T13:26:58.479 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:58 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T13:26:58.479 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:58 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T13:26:58.480 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:58 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:26:58.480 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:58 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:26:58.480 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:58 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:26:58.480 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:58 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:26:58.480 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:58 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:26:58.480 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:58 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T13:26:58.480 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:58 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T13:26:58.480 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:58 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T13:26:58.480 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:58 vm08 ceph-mon[82639]: Manager daemon y is now available 2026-03-10T13:26:58.480 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:58 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:26:58.480 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:58 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:26:58.480 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:58 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:26:58.480 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:58 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:26:58.480 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:58 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:26:58.610 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:26:58.610 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:26:58.610 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:26:58.610 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T13:26:58.610 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T13:26:58.610 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T13:26:58.610 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T13:26:58.610 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T13:26:58.610 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:26:58.610 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:26:58.611 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:26:58.611 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:26:58.611 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:26:58.611 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T13:26:58.611 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T13:26:58.611 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T13:26:58.611 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[94470]: Manager daemon y is now available 2026-03-10T13:26:58.611 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:26:58.611 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:26:58.611 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:26:58.611 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:26:58.611 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:26:58.611 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:26:58 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:26:58.305+0000 7f1445d54640 -1 mgr.server handle_report got status from non-daemon mon.a 2026-03-10T13:26:58.611 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[51670]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:26:58.611 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[51670]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:26:58.611 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[51670]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:26:58.611 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[51670]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T13:26:58.611 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[51670]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T13:26:58.611 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[51670]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T13:26:58.611 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[51670]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T13:26:58.611 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[51670]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T13:26:58.611 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[51670]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:26:58.611 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[51670]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:26:58.611 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[51670]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:26:58.611 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[51670]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:26:58.611 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[51670]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:26:58.611 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[51670]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T13:26:58.611 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[51670]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T13:26:58.611 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[51670]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T13:26:58.611 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[51670]: Manager daemon y is now available 2026-03-10T13:26:58.611 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[51670]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:26:58.611 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[51670]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:26:58.611 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[51670]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T13:26:58.611 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[51670]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:26:58.611 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:58 vm00 ceph-mon[51670]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T13:26:59.588 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:59 vm08 ceph-mon[82639]: [10/Mar/2026:13:26:58] ENGINE Bus STARTING 2026-03-10T13:26:59.588 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:59 vm08 ceph-mon[82639]: [10/Mar/2026:13:26:58] ENGINE Serving on http://192.168.123.100:8765 2026-03-10T13:26:59.588 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:59 vm08 ceph-mon[82639]: mgrmap e43: y(active, since 1.05082s), standbys: x 2026-03-10T13:26:59.588 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:59 vm08 ceph-mon[82639]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:26:59.588 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:59 vm08 ceph-mon[82639]: [10/Mar/2026:13:26:58] ENGINE Serving on https://192.168.123.100:7150 2026-03-10T13:26:59.588 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:59 vm08 ceph-mon[82639]: [10/Mar/2026:13:26:58] ENGINE Bus STARTED 2026-03-10T13:26:59.588 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:59 vm08 ceph-mon[82639]: [10/Mar/2026:13:26:58] ENGINE Client ('192.168.123.100', 53206) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T13:26:59.588 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:59 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:26:59.588 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:59 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:26:59.588 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:59 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:26:59.588 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:26:59 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:26:59.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:59 vm00 ceph-mon[94470]: [10/Mar/2026:13:26:58] ENGINE Bus STARTING 2026-03-10T13:26:59.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:59 vm00 ceph-mon[94470]: [10/Mar/2026:13:26:58] ENGINE Serving on http://192.168.123.100:8765 2026-03-10T13:26:59.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:59 vm00 ceph-mon[94470]: mgrmap e43: y(active, since 1.05082s), standbys: x 2026-03-10T13:26:59.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:59 vm00 ceph-mon[94470]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:26:59.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:59 vm00 ceph-mon[94470]: [10/Mar/2026:13:26:58] ENGINE Serving on https://192.168.123.100:7150 2026-03-10T13:26:59.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:59 vm00 ceph-mon[94470]: [10/Mar/2026:13:26:58] ENGINE Bus STARTED 2026-03-10T13:26:59.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:59 vm00 ceph-mon[94470]: [10/Mar/2026:13:26:58] ENGINE Client ('192.168.123.100', 53206) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T13:26:59.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:59 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:26:59.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:59 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:26:59.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:59 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:26:59.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:26:59 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:26:59.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:59 vm00 ceph-mon[51670]: [10/Mar/2026:13:26:58] ENGINE Bus STARTING 2026-03-10T13:26:59.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:59 vm00 ceph-mon[51670]: [10/Mar/2026:13:26:58] ENGINE Serving on http://192.168.123.100:8765 2026-03-10T13:26:59.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:59 vm00 ceph-mon[51670]: mgrmap e43: y(active, since 1.05082s), standbys: x 2026-03-10T13:26:59.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:59 vm00 ceph-mon[51670]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:26:59.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:59 vm00 ceph-mon[51670]: [10/Mar/2026:13:26:58] ENGINE Serving on https://192.168.123.100:7150 2026-03-10T13:26:59.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:59 vm00 ceph-mon[51670]: [10/Mar/2026:13:26:58] ENGINE Bus STARTED 2026-03-10T13:26:59.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:59 vm00 ceph-mon[51670]: [10/Mar/2026:13:26:58] ENGINE Client ('192.168.123.100', 53206) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T13:26:59.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:59 vm00 ceph-mon[51670]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:26:59.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:59 vm00 ceph-mon[51670]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:26:59.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:59 vm00 ceph-mon[51670]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:26:59.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:26:59 vm00 ceph-mon[51670]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:00.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:00 vm00 ceph-mon[94470]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:27:00.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:00 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:00.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:00 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:00.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:00 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:00.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:00 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:00.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:00 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:27:00.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:00 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:27:00.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:00 vm00 ceph-mon[94470]: mgrmap e44: y(active, since 2s), standbys: x 2026-03-10T13:27:00.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:00 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:00.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:00 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:00.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:00 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:27:00.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:00 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:27:00.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:00 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:00.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:00 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:27:00.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:00 vm00 ceph-mon[51670]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:27:00.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:00 vm00 ceph-mon[51670]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:00.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:00 vm00 ceph-mon[51670]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:00.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:00 vm00 ceph-mon[51670]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:00.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:00 vm00 ceph-mon[51670]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:00.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:00 vm00 ceph-mon[51670]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:27:00.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:00 vm00 ceph-mon[51670]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:27:00.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:00 vm00 ceph-mon[51670]: mgrmap e44: y(active, since 2s), standbys: x 2026-03-10T13:27:00.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:00 vm00 ceph-mon[51670]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:00.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:00 vm00 ceph-mon[51670]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:00.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:00 vm00 ceph-mon[51670]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:27:00.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:00 vm00 ceph-mon[51670]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:27:00.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:00 vm00 ceph-mon[51670]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:00.754 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:00 vm00 ceph-mon[51670]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:27:00.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:00 vm08 ceph-mon[82639]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:27:00.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:00 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:00.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:00 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:00.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:00 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:00.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:00 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:00.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:00 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:27:00.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:00 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:27:00.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:00 vm08 ceph-mon[82639]: mgrmap e44: y(active, since 2s), standbys: x 2026-03-10T13:27:00.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:00 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:00.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:00 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:00.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:00 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:27:00.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:00 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-10T13:27:00.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:00 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:00.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:00 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:27:01.580 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:01 vm00 systemd[1]: Stopping Ceph mon.c for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:27:01.927 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:27:01 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:27:01] ENGINE Bus STOPPING 2026-03-10T13:27:01.927 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:01 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mon-c[51666]: 2026-03-10T13:27:01.577+0000 7f218596e700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.c -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T13:27:01.927 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:01 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mon-c[51666]: 2026-03-10T13:27:01.577+0000 7f218596e700 -1 mon.c@1(peon) e3 *** Got Signal Terminated *** 2026-03-10T13:27:01.927 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:01 vm00 podman[96160]: 2026-03-10 13:27:01.678487132 +0000 UTC m=+0.113657186 container died d00b7fd44c230f2968fc2afddc8160396de28998206851a16117ed1e8f898dbb (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mon-c, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, name=centos-stream, ceph=True, vendor=Red Hat, Inc., distribution-scope=public, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, GIT_BRANCH=HEAD, architecture=x86_64, vcs-type=git, io.k8s.display-name=CentOS Stream 8, io.openshift.expose-services=, maintainer=Guillaume Abrioux , release=754, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, version=8, RELEASE=HEAD, io.openshift.tags=base centos centos-stream, GIT_CLEAN=True, build-date=2022-05-03T08:36:31.336870, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, com.redhat.component=centos-stream-container, CEPH_POINT_RELEASE=-17.2.0, GIT_REPO=https://github.com/ceph/ceph-container.git, io.buildah.version=1.19.8, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.) 2026-03-10T13:27:01.927 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:01 vm00 podman[96160]: 2026-03-10 13:27:01.695040429 +0000 UTC m=+0.130210483 container remove d00b7fd44c230f2968fc2afddc8160396de28998206851a16117ed1e8f898dbb (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mon-c, version=8, GIT_BRANCH=HEAD, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, RELEASE=HEAD, vcs-type=git, GIT_CLEAN=True, com.redhat.component=centos-stream-container, build-date=2022-05-03T08:36:31.336870, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, GIT_REPO=https://github.com/ceph/ceph-container.git, CEPH_POINT_RELEASE=-17.2.0, io.buildah.version=1.19.8, maintainer=Guillaume Abrioux , release=754, io.k8s.display-name=CentOS Stream 8, name=centos-stream, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vendor=Red Hat, Inc., architecture=x86_64, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, io.openshift.expose-services=, io.openshift.tags=base centos centos-stream, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, ceph=True, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com) 2026-03-10T13:27:01.927 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:01 vm00 bash[96160]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mon-c 2026-03-10T13:27:01.927 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:01 vm00 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mon.c.service: Deactivated successfully. 2026-03-10T13:27:01.927 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:01 vm00 systemd[1]: Stopped Ceph mon.c for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:27:01.927 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:01 vm00 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mon.c.service: Consumed 9.688s CPU time. 2026-03-10T13:27:02.252 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:27:01 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:27:01] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-10T13:27:02.252 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:27:01 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:27:01] ENGINE Bus STOPPED 2026-03-10T13:27:02.252 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:27:01 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:27:01] ENGINE Bus STARTING 2026-03-10T13:27:02.252 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:27:02 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:27:02] ENGINE Serving on http://:::9283 2026-03-10T13:27:02.252 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:27:02 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: [10/Mar/2026:13:27:02] ENGINE Bus STARTED 2026-03-10T13:27:02.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:01 vm00 systemd[1]: Starting Ceph mon.c for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:27:02.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 podman[96279]: 2026-03-10 13:27:02.021894236 +0000 UTC m=+0.020100228 container create 43deda66dee35e12d8896091c92b05e1e68ed5aea0df0457ccc3ba3237b80fdb (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mon-c, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0) 2026-03-10T13:27:02.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 podman[96279]: 2026-03-10 13:27:02.060503323 +0000 UTC m=+0.058709305 container init 43deda66dee35e12d8896091c92b05e1e68ed5aea0df0457ccc3ba3237b80fdb (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mon-c, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, ceph=True, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T13:27:02.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 podman[96279]: 2026-03-10 13:27:02.063266756 +0000 UTC m=+0.061472737 container start 43deda66dee35e12d8896091c92b05e1e68ed5aea0df0457ccc3ba3237b80fdb (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mon-c, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T13:27:02.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 bash[96279]: 43deda66dee35e12d8896091c92b05e1e68ed5aea0df0457ccc3ba3237b80fdb 2026-03-10T13:27:02.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 podman[96279]: 2026-03-10 13:27:02.011231416 +0000 UTC m=+0.009437408 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:27:02.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 systemd[1]: Started Ceph mon.c for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:27:02.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: set uid:gid to 167:167 (ceph:ceph) 2026-03-10T13:27:02.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable), process ceph-mon, pid 2 2026-03-10T13:27:02.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: pidfile_write: ignore empty --pid-file 2026-03-10T13:27:02.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: load: jerasure load: lrc 2026-03-10T13:27:02.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: RocksDB version: 7.9.2 2026-03-10T13:27:02.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Git sha 0 2026-03-10T13:27:02.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Compile date 2026-02-25 18:11:04 2026-03-10T13:27:02.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: DB SUMMARY 2026-03-10T13:27:02.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: DB Session ID: 7Y8Y1LE9CRXJ2PS28K2B 2026-03-10T13:27:02.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: CURRENT file: CURRENT 2026-03-10T13:27:02.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: IDENTITY file: IDENTITY 2026-03-10T13:27:02.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: MANIFEST file: MANIFEST-000009 size: 2258 Bytes 2026-03-10T13:27:02.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: SST files in /var/lib/ceph/mon/ceph-c/store.db dir, Total Num: 1, files: 000045.sst 2026-03-10T13:27:02.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-c/store.db: 000043.log size: 1065176 ; 2026-03-10T13:27:02.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.error_if_exists: 0 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.create_if_missing: 0 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.paranoid_checks: 1 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.flush_verify_memtable_count: 1 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.env: 0x555a07cb0dc0 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.fs: PosixFileSystem 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.info_log: 0x555a09aa45c0 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.max_file_opening_threads: 16 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.statistics: (nil) 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.use_fsync: 0 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.max_log_file_size: 0 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.keep_log_file_num: 1000 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.recycle_log_file_num: 0 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.allow_fallocate: 1 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.allow_mmap_reads: 0 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.allow_mmap_writes: 0 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.use_direct_reads: 0 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.create_missing_column_families: 0 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.db_log_dir: 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.wal_dir: 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.advise_random_on_open: 1 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.db_write_buffer_size: 0 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.write_buffer_manager: 0x555a09aa9900 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.rate_limiter: (nil) 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.wal_recovery_mode: 2 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.enable_thread_tracking: 0 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.enable_pipelined_write: 0 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.unordered_write: 0 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.row_cache: None 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.wal_filter: None 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.allow_ingest_behind: 0 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.two_write_queues: 0 2026-03-10T13:27:02.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.manual_wal_flush: 0 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.wal_compression: 0 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.atomic_flush: 0 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.log_readahead_size: 0 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.best_efforts_recovery: 0 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.allow_data_in_errors: 0 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.db_host_id: __hostname__ 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.enforce_single_del_contracts: true 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.max_background_jobs: 2 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.max_background_compactions: -1 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.max_subcompactions: 1 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.max_total_wal_size: 0 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.max_open_files: -1 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.bytes_per_sync: 0 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.compaction_readahead_size: 0 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.max_background_flushes: -1 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Compression algorithms supported: 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: kZSTD supported: 0 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: kXpressCompression supported: 0 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: kBZip2Compression supported: 0 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: kLZ4Compression supported: 1 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: kZlibCompression supported: 1 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: kLZ4HCCompression supported: 1 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: kSnappyCompression supported: 1 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: DMutex implementation: pthread_mutex_t 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-c/store.db/MANIFEST-000009 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.merge_operator: 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.compaction_filter: None 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.compaction_filter_factory: None 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.sst_partitioner_factory: None 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x555a09aa45a0) 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout: cache_index_and_filter_blocks: 1 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout: pin_top_level_index_and_filter: 1 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout: index_type: 0 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout: data_block_index_type: 0 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout: index_shortening: 1 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout: checksum: 4 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout: no_block_cache: 0 2026-03-10T13:27:02.255 INFO:journalctl@ceph.mon.c.vm00.stdout: block_cache: 0x555a09ac9350 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout: block_cache_name: BinnedLRUCache 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout: block_cache_options: 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout: capacity : 536870912 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout: num_shard_bits : 4 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout: strict_capacity_limit : 0 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout: high_pri_pool_ratio: 0.000 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout: block_cache_compressed: (nil) 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout: persistent_cache: (nil) 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout: block_size: 4096 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout: block_size_deviation: 10 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout: block_restart_interval: 16 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout: index_block_restart_interval: 1 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout: metadata_block_size: 4096 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout: partition_filters: 0 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout: use_delta_encoding: 1 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout: filter_policy: bloomfilter 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout: whole_key_filtering: 1 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout: verify_compression: 0 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout: read_amp_bytes_per_bit: 0 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout: format_version: 5 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout: enable_index_compression: 1 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout: block_align: 0 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout: max_auto_readahead_size: 262144 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout: prepopulate_block_cache: 0 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout: initial_auto_readahead_size: 8192 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout: num_file_reads_for_auto_readahead: 2 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.write_buffer_size: 33554432 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.max_write_buffer_number: 2 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.compression: NoCompression 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.bottommost_compression: Disabled 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.prefix_extractor: nullptr 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.num_levels: 7 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.compression_opts.level: 32767 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.compression_opts.strategy: 0 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.compression_opts.enabled: false 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-10T13:27:02.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.target_file_size_base: 67108864 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.arena_block_size: 1048576 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.disable_auto_compactions: 0 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.inplace_update_support: 0 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.bloom_locality: 0 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.max_successive_merges: 0 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.paranoid_file_checks: 0 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.force_consistency_checks: 1 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.report_bg_io_stats: 0 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.ttl: 2592000 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.preclude_last_level_data_seconds: 0 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.preserve_internal_time_seconds: 0 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.enable_blob_files: false 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.min_blob_size: 0 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.blob_file_size: 268435456 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.blob_compaction_readahead_size: 0 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.blob_file_starting_level: 0 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: [table/block_based/block_based_table_reader.cc:721] At least one SST file opened without unique ID to verify: 45.sst 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed. 2026-03-10T13:27:02.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-c/store.db/MANIFEST-000009 succeeded,manifest_file_number is 9, next_file_number is 47, last_sequence is 25128, log_number is 43,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 43 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: a0db31f8-6f6a-4b01-888b-dae5e25761fe 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773149222092784, "job": 1, "event": "recovery_started", "wal_files": [43]} 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #43 mode 2 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773149222097045, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 48, "file_size": 627667, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 25133, "largest_seqno": 25316, "table_properties": {"data_size": 625460, "index_size": 918, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 261, "raw_key_size": 2062, "raw_average_key_size": 24, "raw_value_size": 623068, "raw_average_value_size": 7330, "num_data_blocks": 41, "num_entries": 85, "num_filter_entries": 85, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1773149222, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "a0db31f8-6f6a-4b01-888b-dae5e25761fe", "db_session_id": "7Y8Y1LE9CRXJ2PS28K2B", "orig_file_number": 48, "seqno_to_time_mapping": "N/A"}} 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773149222097121, "job": 1, "event": "recovery_finished"} 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: [db/version_set.cc:5047] Creating manifest 50 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed. 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-c/store.db/000043.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x555a09acae00 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: DB pointer 0x555a09be0000 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: rocksdb: [db/db_impl/db_impl.cc:1111] 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: ** DB Stats ** 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: ** Compaction Stats [default] ** 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: L0 1/0 612.96 KB 0.2 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 196.1 0.00 0.00 1 0.003 0 0 0.0 0.0 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: L6 1/0 11.15 MB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0 0.0 0.0 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: Sum 2/0 11.74 MB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 196.1 0.00 0.00 1 0.003 0 0 0.0 0.0 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 196.1 0.00 0.00 1 0.003 0 0 0.0 0.0 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: ** Compaction Stats [default] ** 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 196.1 0.00 0.00 1 0.003 0 0 0.0 0.0 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: Flush(GB): cumulative 0.001, interval 0.001 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: Cumulative compaction: 0.00 GB write, 46.75 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: Interval compaction: 0.00 GB write, 46.75 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: Block cache BinnedLRUCache@0x555a09ac9350#2 capacity: 512.00 MB usage: 1.39 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 1.2e-05 secs_since: 0 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: Block cache entry stats(count,size,portion): FilterBlock(1,0.31 KB,5.96046e-05%) IndexBlock(1,1.08 KB,0.000205636%) Misc(1,0.00 KB,0%) 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: starting mon.c rank 1 at public addrs [v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0] at bind addrs [v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0] mon_data /var/lib/ceph/mon/ceph-c fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: mon.c@-1(???) e3 preinit fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: mon.c@-1(???).mds e1 new map 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: mon.c@-1(???).mds e1 print_map 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: e1 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: btime 1970-01-01T00:00:00:000000+0000 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2} 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: legacy client fscid: -1 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: 2026-03-10T13:27:02.258 INFO:journalctl@ceph.mon.c.vm00.stdout: No filesystems configured 2026-03-10T13:27:02.259 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: mon.c@-1(???).osd e95 crush map has features 3314933000854323200, adjusting msgr requires 2026-03-10T13:27:02.259 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: mon.c@-1(???).osd e95 crush map has features 432629239337189376, adjusting msgr requires 2026-03-10T13:27:02.259 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: mon.c@-1(???).osd e95 crush map has features 432629239337189376, adjusting msgr requires 2026-03-10T13:27:02.259 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: mon.c@-1(???).osd e95 crush map has features 432629239337189376, adjusting msgr requires 2026-03-10T13:27:02.259 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: mon.c@-1(???).paxosservice(auth 1..25) refresh upgraded, format 0 -> 3 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[94470]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[94470]: Updating vm08:/etc/ceph/ceph.conf 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[94470]: Updating vm08:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.conf 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[94470]: Updating vm00:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.conf 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[94470]: Updating vm08:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[94470]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[94470]: Updating vm08:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.client.admin.keyring 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[94470]: Updating vm00:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.client.admin.keyring 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["c"]}]: dispatch 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[94470]: Upgrade: It appears safe to stop mon.c 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[94470]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[94470]: Upgrade: Updating mon.c 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[94470]: Deploying daemon mon.c on vm00 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[94470]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: Updating vm08:/etc/ceph/ceph.conf 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: Updating vm08:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.conf 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: Updating vm00:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.conf 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: Updating vm08:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: Updating vm08:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.client.admin.keyring 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: Updating vm00:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.client.admin.keyring 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["c"]}]: dispatch 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: Upgrade: It appears safe to stop mon.c 2026-03-10T13:27:02.586 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:27:02.587 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: Upgrade: Updating mon.c 2026-03-10T13:27:02.587 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:02.587 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T13:27:02.587 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T13:27:02.587 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:02.587 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: Deploying daemon mon.c on vm00 2026-03-10T13:27:02.587 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:27:02.587 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:27:02.587 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:27:02.587 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:02 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:27:02.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:02 vm08 ceph-mon[82639]: Updating vm00:/etc/ceph/ceph.conf 2026-03-10T13:27:02.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:02 vm08 ceph-mon[82639]: Updating vm08:/etc/ceph/ceph.conf 2026-03-10T13:27:02.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:02 vm08 ceph-mon[82639]: Updating vm08:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.conf 2026-03-10T13:27:02.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:02 vm08 ceph-mon[82639]: Updating vm00:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.conf 2026-03-10T13:27:02.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:02 vm08 ceph-mon[82639]: Updating vm08:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:27:02.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:02 vm08 ceph-mon[82639]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-03-10T13:27:02.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:02 vm08 ceph-mon[82639]: Updating vm08:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.client.admin.keyring 2026-03-10T13:27:02.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:02 vm08 ceph-mon[82639]: Updating vm00:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/config/ceph.client.admin.keyring 2026-03-10T13:27:02.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:02 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:02.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:02 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:02.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:02 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:02.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:02 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:02.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:02 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:02.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:02 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:27:02.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:02 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:02.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:02 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-10T13:27:02.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:02 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["c"]}]: dispatch 2026-03-10T13:27:02.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:02 vm08 ceph-mon[82639]: Upgrade: It appears safe to stop mon.c 2026-03-10T13:27:02.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:02 vm08 ceph-mon[82639]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:27:02.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:02 vm08 ceph-mon[82639]: Upgrade: Updating mon.c 2026-03-10T13:27:02.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:02 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:02.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:02 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T13:27:02.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:02 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T13:27:02.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:02 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:02.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:02 vm08 ceph-mon[82639]: Deploying daemon mon.c on vm00 2026-03-10T13:27:02.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:02 vm08 ceph-mon[82639]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:27:02.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:02 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:27:02.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:02 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:27:02.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:02 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[94470]: mon.a calling monitor election 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[94470]: mon.b calling monitor election 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[94470]: mon.c calling monitor election 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[94470]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[94470]: monmap epoch 4 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[94470]: fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[94470]: last_changed 2026-03-10T13:27:02.297636+0000 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[94470]: created 2026-03-10T13:04:24.045109+0000 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[94470]: min_mon_release 19 (squid) 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[94470]: election_strategy: 1 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[94470]: 0: [v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0] mon.a 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[94470]: 1: [v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0] mon.c 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[94470]: 2: [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] mon.b 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[94470]: fsmap 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[94470]: osdmap e95: 8 total, 8 up, 8 in 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[94470]: mgrmap e44: y(active, since 5s), standbys: x 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[94470]: overall HEALTH_OK 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='' 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[96293]: mon.a calling monitor election 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[96293]: mon.b calling monitor election 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[96293]: mon.c calling monitor election 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[96293]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[96293]: monmap epoch 4 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[96293]: fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[96293]: last_changed 2026-03-10T13:27:02.297636+0000 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[96293]: created 2026-03-10T13:04:24.045109+0000 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[96293]: min_mon_release 19 (squid) 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[96293]: election_strategy: 1 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[96293]: 0: [v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0] mon.a 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[96293]: 1: [v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0] mon.c 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[96293]: 2: [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] mon.b 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[96293]: fsmap 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[96293]: osdmap e95: 8 total, 8 up, 8 in 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[96293]: mgrmap e44: y(active, since 5s), standbys: x 2026-03-10T13:27:03.408 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[96293]: overall HEALTH_OK 2026-03-10T13:27:03.409 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='' 2026-03-10T13:27:03.409 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:03 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:03.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:03 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T13:27:03.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:03 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T13:27:03.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:03 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T13:27:03.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:03 vm08 ceph-mon[82639]: mon.a calling monitor election 2026-03-10T13:27:03.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:03 vm08 ceph-mon[82639]: mon.b calling monitor election 2026-03-10T13:27:03.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:03 vm08 ceph-mon[82639]: mon.c calling monitor election 2026-03-10T13:27:03.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:03 vm08 ceph-mon[82639]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T13:27:03.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:03 vm08 ceph-mon[82639]: monmap epoch 4 2026-03-10T13:27:03.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:03 vm08 ceph-mon[82639]: fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 2026-03-10T13:27:03.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:03 vm08 ceph-mon[82639]: last_changed 2026-03-10T13:27:02.297636+0000 2026-03-10T13:27:03.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:03 vm08 ceph-mon[82639]: created 2026-03-10T13:04:24.045109+0000 2026-03-10T13:27:03.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:03 vm08 ceph-mon[82639]: min_mon_release 19 (squid) 2026-03-10T13:27:03.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:03 vm08 ceph-mon[82639]: election_strategy: 1 2026-03-10T13:27:03.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:03 vm08 ceph-mon[82639]: 0: [v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0] mon.a 2026-03-10T13:27:03.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:03 vm08 ceph-mon[82639]: 1: [v2:192.168.123.100:3301/0,v1:192.168.123.100:6790/0] mon.c 2026-03-10T13:27:03.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:03 vm08 ceph-mon[82639]: 2: [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] mon.b 2026-03-10T13:27:03.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:03 vm08 ceph-mon[82639]: fsmap 2026-03-10T13:27:03.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:03 vm08 ceph-mon[82639]: osdmap e95: 8 total, 8 up, 8 in 2026-03-10T13:27:03.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:03 vm08 ceph-mon[82639]: mgrmap e44: y(active, since 5s), standbys: x 2026-03-10T13:27:03.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:03 vm08 ceph-mon[82639]: overall HEALTH_OK 2026-03-10T13:27:03.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:03 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='' 2026-03-10T13:27:03.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:03 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:04.334 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:04 vm00 ceph-mon[94470]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:27:04.334 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:04 vm00 ceph-mon[94470]: mgrmap e45: y(active, since 6s), standbys: x 2026-03-10T13:27:04.334 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:04 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:04.334 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:04 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:04.334 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:04 vm00 ceph-mon[96293]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:27:04.334 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:04 vm00 ceph-mon[96293]: mgrmap e45: y(active, since 6s), standbys: x 2026-03-10T13:27:04.334 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:04 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:04.334 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:04 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:04.334 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:04 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:04.334 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:04 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:04.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:04 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:04.662 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:04 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:04.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:04 vm08 ceph-mon[82639]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:27:04.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:04 vm08 ceph-mon[82639]: mgrmap e45: y(active, since 6s), standbys: x 2026-03-10T13:27:04.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:04 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:04.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:04 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:04.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:04 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:04.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:04 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:05.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[94470]: Detected new or changed devices on vm00 2026-03-10T13:27:05.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:05.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:05.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:05.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:27:05.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:05.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[94470]: Reconfiguring mon.a (monmap changed)... 2026-03-10T13:27:05.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T13:27:05.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T13:27:05.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:05.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[94470]: Reconfiguring daemon mon.a on vm00 2026-03-10T13:27:05.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:05.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:05.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[94470]: Reconfiguring mgr.y (monmap changed)... 2026-03-10T13:27:05.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T13:27:05.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T13:27:05.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T13:27:05.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:05.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[94470]: Reconfiguring daemon mgr.y on vm00 2026-03-10T13:27:05.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:05.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:05.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T13:27:05.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T13:27:05.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:05.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:05.754 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:05.755 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-10T13:27:05.755 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:05.755 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:27:05 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:27:05] "GET /metrics HTTP/1.1" 200 34775 "" "Prometheus/2.51.0" 2026-03-10T13:27:05.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[96293]: Detected new or changed devices on vm00 2026-03-10T13:27:05.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:05.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:05.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:05.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:27:05.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:05.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[96293]: Reconfiguring mon.a (monmap changed)... 2026-03-10T13:27:05.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T13:27:05.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T13:27:05.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:05.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[96293]: Reconfiguring daemon mon.a on vm00 2026-03-10T13:27:05.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:05.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:05.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[96293]: Reconfiguring mgr.y (monmap changed)... 2026-03-10T13:27:05.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T13:27:05.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T13:27:05.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T13:27:05.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:05.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[96293]: Reconfiguring daemon mgr.y on vm00 2026-03-10T13:27:05.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:05.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:05.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T13:27:05.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T13:27:05.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:05.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:05.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:05.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-10T13:27:05.755 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:05 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:06.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:05 vm08 ceph-mon[82639]: Detected new or changed devices on vm00 2026-03-10T13:27:06.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:05 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:06.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:05 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:06.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:05 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:06.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:05 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:27:06.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:05 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:06.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:05 vm08 ceph-mon[82639]: Reconfiguring mon.a (monmap changed)... 2026-03-10T13:27:06.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:05 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T13:27:06.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:05 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T13:27:06.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:05 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:06.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:05 vm08 ceph-mon[82639]: Reconfiguring daemon mon.a on vm00 2026-03-10T13:27:06.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:05 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:06.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:05 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:06.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:05 vm08 ceph-mon[82639]: Reconfiguring mgr.y (monmap changed)... 2026-03-10T13:27:06.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:05 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T13:27:06.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:05 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T13:27:06.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:05 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T13:27:06.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:05 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:06.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:05 vm08 ceph-mon[82639]: Reconfiguring daemon mgr.y on vm00 2026-03-10T13:27:06.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:05 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:06.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:05 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:06.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:05 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T13:27:06.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:05 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T13:27:06.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:05 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:06.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:05 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:06.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:05 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:06.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:05 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-10T13:27:06.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:05 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:06.935 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[94470]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-10T13:27:06.935 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[94470]: Reconfiguring mon.c (monmap changed)... 2026-03-10T13:27:06.935 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[94470]: Reconfiguring daemon mon.c on vm00 2026-03-10T13:27:06.935 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[94470]: Reconfiguring osd.0 (monmap changed)... 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[94470]: Reconfiguring daemon osd.0 on vm00 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[94470]: Reconfiguring osd.1 (monmap changed)... 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[94470]: Reconfiguring daemon osd.1 on vm00 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm00.tvlvzo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm00.tvlvzo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[96293]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[96293]: Reconfiguring mon.c (monmap changed)... 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[96293]: Reconfiguring daemon mon.c on vm00 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[96293]: Reconfiguring osd.0 (monmap changed)... 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[96293]: Reconfiguring daemon osd.0 on vm00 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[96293]: Reconfiguring osd.1 (monmap changed)... 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[96293]: Reconfiguring daemon osd.1 on vm00 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm00.tvlvzo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm00.tvlvzo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T13:27:06.936 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:06 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:07.202 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:06 vm08 ceph-mon[82639]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-10T13:27:07.203 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:06 vm08 ceph-mon[82639]: Reconfiguring mon.c (monmap changed)... 2026-03-10T13:27:07.203 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:06 vm08 ceph-mon[82639]: Reconfiguring daemon mon.c on vm00 2026-03-10T13:27:07.203 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:06 vm08 ceph-mon[82639]: Reconfiguring osd.0 (monmap changed)... 2026-03-10T13:27:07.203 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:06 vm08 ceph-mon[82639]: Reconfiguring daemon osd.0 on vm00 2026-03-10T13:27:07.203 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:06 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:07.203 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:06 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:07.203 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:06 vm08 ceph-mon[82639]: Reconfiguring osd.1 (monmap changed)... 2026-03-10T13:27:07.203 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:06 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-10T13:27:07.203 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:06 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:07.203 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:06 vm08 ceph-mon[82639]: Reconfiguring daemon osd.1 on vm00 2026-03-10T13:27:07.203 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:06 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:07.203 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:06 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:07.203 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:06 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-10T13:27:07.203 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:06 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:07.203 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:06 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:07.203 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:06 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:07.203 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:06 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-10T13:27:07.203 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:06 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:07.203 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:06 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:07.203 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:06 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:07.203 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:06 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm00.tvlvzo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T13:27:07.203 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:06 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm00.tvlvzo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T13:27:07.203 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:06 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:07.252 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:27:07 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:27:07.107+0000 7f1445d54640 -1 mgr.server handle_report got status from non-daemon mon.c 2026-03-10T13:27:07.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:27:06 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:27:06.988Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:27:07.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:27:06 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:27:06.989Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:27:08.012 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:08 vm08 ceph-mon[82639]: Reconfiguring osd.2 (monmap changed)... 2026-03-10T13:27:08.012 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:08 vm08 ceph-mon[82639]: Reconfiguring daemon osd.2 on vm00 2026-03-10T13:27:08.012 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:08 vm08 ceph-mon[82639]: Reconfiguring osd.3 (monmap changed)... 2026-03-10T13:27:08.012 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:08 vm08 ceph-mon[82639]: Reconfiguring daemon osd.3 on vm00 2026-03-10T13:27:08.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:08 vm08 ceph-mon[82639]: Reconfiguring rgw.foo.vm00.tvlvzo (monmap changed)... 2026-03-10T13:27:08.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:08 vm08 ceph-mon[82639]: Reconfiguring daemon rgw.foo.vm00.tvlvzo on vm00 2026-03-10T13:27:08.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:08 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:08.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:08 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:08.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:08 vm08 ceph-mon[82639]: Reconfiguring mon.b (monmap changed)... 2026-03-10T13:27:08.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:08 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T13:27:08.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:08 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T13:27:08.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:08 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:08.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:08 vm08 ceph-mon[82639]: Reconfiguring daemon mon.b on vm08 2026-03-10T13:27:08.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:08 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:08.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:08 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:08.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:08 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T13:27:08.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:08 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T13:27:08.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:08 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T13:27:08.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:08 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:08.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:08 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:08.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:08 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:08.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:08 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-10T13:27:08.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:08 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:08.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:08 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:08.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:08 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:08.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:08 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-10T13:27:08.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:08 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:08.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[94470]: Reconfiguring osd.2 (monmap changed)... 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[94470]: Reconfiguring daemon osd.2 on vm00 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[94470]: Reconfiguring osd.3 (monmap changed)... 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[94470]: Reconfiguring daemon osd.3 on vm00 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[94470]: Reconfiguring rgw.foo.vm00.tvlvzo (monmap changed)... 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[94470]: Reconfiguring daemon rgw.foo.vm00.tvlvzo on vm00 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[94470]: Reconfiguring mon.b (monmap changed)... 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[94470]: Reconfiguring daemon mon.b on vm08 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[96293]: Reconfiguring osd.2 (monmap changed)... 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[96293]: Reconfiguring daemon osd.2 on vm00 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[96293]: Reconfiguring osd.3 (monmap changed)... 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[96293]: Reconfiguring daemon osd.3 on vm00 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[96293]: Reconfiguring rgw.foo.vm00.tvlvzo (monmap changed)... 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[96293]: Reconfiguring daemon rgw.foo.vm00.tvlvzo on vm00 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[96293]: Reconfiguring mon.b (monmap changed)... 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[96293]: Reconfiguring daemon mon.b on vm08 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-10T13:27:08.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:08 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:09.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:09 vm08 ceph-mon[82639]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-10T13:27:09.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:09 vm08 ceph-mon[82639]: Reconfiguring mgr.x (monmap changed)... 2026-03-10T13:27:09.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:09 vm08 ceph-mon[82639]: Reconfiguring daemon mgr.x on vm08 2026-03-10T13:27:09.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:09 vm08 ceph-mon[82639]: Reconfiguring osd.4 (monmap changed)... 2026-03-10T13:27:09.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:09 vm08 ceph-mon[82639]: Reconfiguring daemon osd.4 on vm08 2026-03-10T13:27:09.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:09 vm08 ceph-mon[82639]: Reconfiguring osd.5 (monmap changed)... 2026-03-10T13:27:09.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:09 vm08 ceph-mon[82639]: Reconfiguring daemon osd.5 on vm08 2026-03-10T13:27:09.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:09 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:09.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:09 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:09.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:09 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-10T13:27:09.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:09 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:09.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:09 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:09.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:09 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:09.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:09 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-10T13:27:09.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:09 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:09.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:09 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:09.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:09 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:09.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:09 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm08.ljayps", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T13:27:09.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:09 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm08.ljayps", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T13:27:09.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:09 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[94470]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[94470]: Reconfiguring mgr.x (monmap changed)... 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[94470]: Reconfiguring daemon mgr.x on vm08 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[94470]: Reconfiguring osd.4 (monmap changed)... 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[94470]: Reconfiguring daemon osd.4 on vm08 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[94470]: Reconfiguring osd.5 (monmap changed)... 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[94470]: Reconfiguring daemon osd.5 on vm08 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm08.ljayps", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm08.ljayps", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[96293]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[96293]: Reconfiguring mgr.x (monmap changed)... 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[96293]: Reconfiguring daemon mgr.x on vm08 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[96293]: Reconfiguring osd.4 (monmap changed)... 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[96293]: Reconfiguring daemon osd.4 on vm08 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[96293]: Reconfiguring osd.5 (monmap changed)... 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[96293]: Reconfiguring daemon osd.5 on vm08 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm08.ljayps", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm08.ljayps", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T13:27:09.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:09 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: Reconfiguring osd.6 (monmap changed)... 2026-03-10T13:27:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: Reconfiguring daemon osd.6 on vm08 2026-03-10T13:27:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: Reconfiguring osd.7 (monmap changed)... 2026-03-10T13:27:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: Reconfiguring daemon osd.7 on vm08 2026-03-10T13:27:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: Reconfiguring rgw.foo.vm08.ljayps (monmap changed)... 2026-03-10T13:27:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: Reconfiguring daemon rgw.foo.vm08.ljayps on vm08 2026-03-10T13:27:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:27:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]': finished 2026-03-10T13:27:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]': finished 2026-03-10T13:27:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:10.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-10T13:27:10.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: Reconfiguring osd.6 (monmap changed)... 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: Reconfiguring daemon osd.6 on vm08 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: Reconfiguring osd.7 (monmap changed)... 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: Reconfiguring daemon osd.7 on vm08 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: Reconfiguring rgw.foo.vm08.ljayps (monmap changed)... 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: Reconfiguring daemon rgw.foo.vm08.ljayps on vm08 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]': finished 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]': finished 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:10.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-10T13:27:10.507 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-10T13:27:10.507 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-10T13:27:10.507 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-10T13:27:10.507 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-10T13:27:10.507 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-10T13:27:10.507 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-10T13:27:10.507 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-10T13:27:10.507 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-10T13:27:10.507 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-10T13:27:10.507 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-10T13:27:10.507 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.507 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.507 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.507 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.507 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.507 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.507 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.507 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.507 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.507 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.507 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.507 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.507 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-10T13:27:10.507 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-10T13:27:10.507 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-10T13:27:10.507 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:10.507 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:27:10.507 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:10.507 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:10.507 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:27:10.507 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: Reconfiguring osd.6 (monmap changed)... 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: Reconfiguring daemon osd.6 on vm08 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: Reconfiguring osd.7 (monmap changed)... 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: Reconfiguring daemon osd.7 on vm08 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: Reconfiguring rgw.foo.vm08.ljayps (monmap changed)... 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: Reconfiguring daemon rgw.foo.vm08.ljayps on vm08 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]': finished 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]': finished 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:10.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T13:27:10.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T13:27:10.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-10T13:27:10.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-10T13:27:10.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-10T13:27:10.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-10T13:27:10.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-10T13:27:10.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-10T13:27:10.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-10T13:27:10.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-10T13:27:10.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-10T13:27:10.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-10T13:27:10.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-10T13:27:10.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-10T13:27:10.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-10T13:27:10.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-10T13:27:10.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-10T13:27:10.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-10T13:27:10.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-10T13:27:10.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-10T13:27:10.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-10T13:27:10.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-10T13:27:10.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-10T13:27:10.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-10T13:27:10.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-10T13:27:10.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-10T13:27:10.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-10T13:27:10.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-10T13:27:10.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-10T13:27:10.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-10T13:27:10.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:10.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-10T13:27:10.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-10T13:27:10.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-10T13:27:10.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:10.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:27:10.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:10.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:10.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:27:10.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:11.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:11 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all mon 2026-03-10T13:27:11.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:11 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all crash 2026-03-10T13:27:11.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:11 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all mds 2026-03-10T13:27:11.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:11 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all rbd-mirror 2026-03-10T13:27:11.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:11 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all cephfs-mirror 2026-03-10T13:27:11.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:11 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all ceph-exporter 2026-03-10T13:27:11.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:11 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all nfs 2026-03-10T13:27:11.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:11 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all nvmeof 2026-03-10T13:27:11.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:11 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all node-exporter 2026-03-10T13:27:11.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:11 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all prometheus 2026-03-10T13:27:11.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:11 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all alertmanager 2026-03-10T13:27:11.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:11 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all grafana 2026-03-10T13:27:11.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:11 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all loki 2026-03-10T13:27:11.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:11 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all promtail 2026-03-10T13:27:11.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:11 vm00 ceph-mon[94470]: Upgrade: Finalizing container_image settings 2026-03-10T13:27:11.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:11 vm00 ceph-mon[94470]: Upgrade: Complete! 2026-03-10T13:27:11.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:11 vm00 ceph-mon[94470]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-10T13:27:11.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:11 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all mon 2026-03-10T13:27:11.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:11 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all crash 2026-03-10T13:27:11.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:11 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all mds 2026-03-10T13:27:11.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:11 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all rbd-mirror 2026-03-10T13:27:11.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:11 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all cephfs-mirror 2026-03-10T13:27:11.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:11 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all ceph-exporter 2026-03-10T13:27:11.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:11 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all nfs 2026-03-10T13:27:11.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:11 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all nvmeof 2026-03-10T13:27:11.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:11 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all node-exporter 2026-03-10T13:27:11.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:11 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all prometheus 2026-03-10T13:27:11.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:11 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all alertmanager 2026-03-10T13:27:11.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:11 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all grafana 2026-03-10T13:27:11.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:11 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all loki 2026-03-10T13:27:11.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:11 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all promtail 2026-03-10T13:27:11.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:11 vm00 ceph-mon[96293]: Upgrade: Finalizing container_image settings 2026-03-10T13:27:11.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:11 vm00 ceph-mon[96293]: Upgrade: Complete! 2026-03-10T13:27:11.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:11 vm00 ceph-mon[96293]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-10T13:27:11.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:11 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all mon 2026-03-10T13:27:11.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:11 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all crash 2026-03-10T13:27:11.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:11 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all mds 2026-03-10T13:27:11.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:11 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all rbd-mirror 2026-03-10T13:27:11.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:11 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all cephfs-mirror 2026-03-10T13:27:11.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:11 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all ceph-exporter 2026-03-10T13:27:11.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:11 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all nfs 2026-03-10T13:27:11.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:11 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all nvmeof 2026-03-10T13:27:11.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:11 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all node-exporter 2026-03-10T13:27:11.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:11 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all prometheus 2026-03-10T13:27:11.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:11 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all alertmanager 2026-03-10T13:27:11.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:11 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all grafana 2026-03-10T13:27:11.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:11 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all loki 2026-03-10T13:27:11.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:11 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all promtail 2026-03-10T13:27:11.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:11 vm08 ceph-mon[82639]: Upgrade: Finalizing container_image settings 2026-03-10T13:27:11.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:11 vm08 ceph-mon[82639]: Upgrade: Complete! 2026-03-10T13:27:11.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:11 vm08 ceph-mon[82639]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-10T13:27:12.455 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:12 vm08 ceph-mon[82639]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:27:12.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:12 vm00 ceph-mon[94470]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:27:12.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:12 vm00 ceph-mon[96293]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:27:13.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:13 vm00 ceph-mon[94470]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-10T13:27:13.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:13.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:27:13.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:13 vm00 ceph-mon[96293]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-10T13:27:13.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:13.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:27:13.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:13 vm08 ceph-mon[82639]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-10T13:27:13.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:13.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:27:15.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:15 vm00 ceph-mon[94470]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-10T13:27:15.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:15 vm00 ceph-mon[96293]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-10T13:27:15.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:15 vm08 ceph-mon[82639]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-10T13:27:16.002 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:27:15 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:27:15] "GET /metrics HTTP/1.1" 200 37588 "" "Prometheus/2.51.0" 2026-03-10T13:27:16.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:16 vm00 ceph-mon[94470]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-10T13:27:16.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:16 vm00 ceph-mon[96293]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-10T13:27:16.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:16 vm08 ceph-mon[82639]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-10T13:27:17.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:27:16 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:27:16.988Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:27:17.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:27:16 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:27:16.989Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:27:18.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:18 vm00 ceph-mon[94470]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:27:18.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:18 vm00 ceph-mon[96293]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:27:18.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:18 vm08 ceph-mon[82639]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 102 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:27:20.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:20 vm00 ceph-mon[94470]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 73 KiB/s rd, 0 B/s wr, 120 op/s 2026-03-10T13:27:20.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:20 vm00 ceph-mon[96293]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 73 KiB/s rd, 0 B/s wr, 120 op/s 2026-03-10T13:27:20.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:20 vm08 ceph-mon[82639]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 73 KiB/s rd, 0 B/s wr, 120 op/s 2026-03-10T13:27:21.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:21 vm00 ceph-mon[94470]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:27:21.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:21 vm00 ceph-mon[96293]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:27:21.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:21 vm08 ceph-mon[82639]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:27:22.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:22 vm00 ceph-mon[96293]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 0 B/s wr, 119 op/s 2026-03-10T13:27:22.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:22 vm00 ceph-mon[94470]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 0 B/s wr, 119 op/s 2026-03-10T13:27:22.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:22 vm08 ceph-mon[82639]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 0 B/s wr, 119 op/s 2026-03-10T13:27:23.002 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-10T13:27:23.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:23 vm00 ceph-mon[94470]: from='client.44130 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:27:23.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:23 vm00 ceph-mon[96293]: from='client.44130 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:27:23.504 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T13:27:23.504 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (13m) 20s ago 20m 26.4M - 0.25.0 c8568f914cd2 12fde3cf83cb 2026-03-10T13:27:23.505 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm08 *:3000 running (85s) 24s ago 20m 74.2M - 10.4.0 c8b91775d855 960e32589e98 2026-03-10T13:27:23.505 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dezodo vm00 running (92s) 20s ago 20m 49.0M - 3.5 e1d6a67b021e 630bf6d4e7f3 2026-03-10T13:27:23.505 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm08 *:8443,9283,8765 running (97s) 24s ago 22m 486M - 19.2.3-678-ge911bdeb 654f31e6858e 31b91eebc856 2026-03-10T13:27:23.505 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (8m) 20s ago 22m 541M - 19.2.3-678-ge911bdeb 654f31e6858e 5bc576d4d32b 2026-03-10T13:27:23.505 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (30s) 20s ago 22m 41.7M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 981df6371890 2026-03-10T13:27:23.505 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm08 running (65s) 24s ago 22m 37.6M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 8cceb678a9ee 2026-03-10T13:27:23.505 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (21s) 20s ago 22m 21.0M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 43deda66dee3 2026-03-10T13:27:23.505 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (13m) 20s ago 20m 10.6M - 1.7.0 72c9c2088986 bcf883401619 2026-03-10T13:27:23.505 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm08 *:9100 running (13m) 24s ago 20m 10.0M - 1.7.0 72c9c2088986 4ac83f03f818 2026-03-10T13:27:23.505 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (21m) 20s ago 21m 53.2M 4096M 17.2.0 e1d6a67b021e 2919c7073fa7 2026-03-10T13:27:23.505 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (21m) 20s ago 21m 57.5M 4096M 17.2.0 e1d6a67b021e 647927dc41ea 2026-03-10T13:27:23.505 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (21m) 20s ago 21m 55.7M 4096M 17.2.0 e1d6a67b021e 1e417e82c2b9 2026-03-10T13:27:23.505 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (21m) 20s ago 21m 53.4M 4096M 17.2.0 e1d6a67b021e 98bb6d7ea69f 2026-03-10T13:27:23.505 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm08 running (21m) 24s ago 21m 54.7M 4096M 17.2.0 e1d6a67b021e e349440ca776 2026-03-10T13:27:23.505 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm08 running (21m) 24s ago 21m 56.5M 4096M 17.2.0 e1d6a67b021e 9a6a2a67cc99 2026-03-10T13:27:23.505 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm08 running (21m) 24s ago 21m 52.2M 4096M 17.2.0 e1d6a67b021e c27676916d52 2026-03-10T13:27:23.505 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm08 running (20m) 24s ago 20m 54.0M 4096M 17.2.0 e1d6a67b021e bf67951990a5 2026-03-10T13:27:23.505 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm08 *:9095 running (98s) 24s ago 20m 48.2M - 2.51.0 1d3b7f56885b 3f9b2d0821c9 2026-03-10T13:27:23.505 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.tvlvzo vm00 *:8000 running (20m) 20s ago 20m 97.4M - 17.2.0 e1d6a67b021e be24eac16807 2026-03-10T13:27:23.505 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm08.ljayps vm08 *:8000 running (20m) 24s ago 20m 94.3M - 17.2.0 e1d6a67b021e 67b525427823 2026-03-10T13:27:23.575 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.mon | length == 1'"'"'' 2026-03-10T13:27:23.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:23 vm08 ceph-mon[82639]: from='client.44130 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:27:24.054 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T13:27:24.105 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.mon | keys'"'"' | grep $sha1' 2026-03-10T13:27:24.594 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:24 vm00 ceph-mon[94470]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 0 B/s wr, 119 op/s 2026-03-10T13:27:24.594 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:24 vm00 ceph-mon[94470]: from='client.34190 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:27:24.594 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:24 vm00 ceph-mon[94470]: from='client.? 192.168.123.100:0/181372594' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:24.594 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:24 vm00 ceph-mon[96293]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 0 B/s wr, 119 op/s 2026-03-10T13:27:24.594 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:24 vm00 ceph-mon[96293]: from='client.34190 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:27:24.594 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:24 vm00 ceph-mon[96293]: from='client.? 192.168.123.100:0/181372594' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:24.595 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)" 2026-03-10T13:27:24.642 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '"'"'.up_to_date | length == 5'"'"'' 2026-03-10T13:27:24.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:24 vm08 ceph-mon[82639]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 0 B/s wr, 119 op/s 2026-03-10T13:27:24.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:24 vm08 ceph-mon[82639]: from='client.34190 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:27:24.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:24 vm08 ceph-mon[82639]: from='client.? 192.168.123.100:0/181372594' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:25.363 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T13:27:25.420 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade status' 2026-03-10T13:27:25.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:25 vm00 ceph-mon[94470]: from='client.? 192.168.123.100:0/2081711569' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:25.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:25 vm00 ceph-mon[96293]: from='client.? 192.168.123.100:0/2081711569' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:25.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:25 vm08 ceph-mon[82639]: from='client.? 192.168.123.100:0/2081711569' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:25.861 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:27:25 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:27:25] "GET /metrics HTTP/1.1" 200 37588 "" "Prometheus/2.51.0" 2026-03-10T13:27:25.863 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:27:25.863 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": null, 2026-03-10T13:27:25.863 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": false, 2026-03-10T13:27:25.863 INFO:teuthology.orchestra.run.vm00.stdout: "which": "", 2026-03-10T13:27:25.863 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [], 2026-03-10T13:27:25.863 INFO:teuthology.orchestra.run.vm00.stdout: "progress": null, 2026-03-10T13:27:25.863 INFO:teuthology.orchestra.run.vm00.stdout: "message": "", 2026-03-10T13:27:25.863 INFO:teuthology.orchestra.run.vm00.stdout: "is_paused": false 2026-03-10T13:27:25.863 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:27:25.903 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph health detail' 2026-03-10T13:27:26.398 INFO:teuthology.orchestra.run.vm00.stdout:HEALTH_OK 2026-03-10T13:27:26.467 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types osd --limit 2' 2026-03-10T13:27:26.659 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:26 vm00 ceph-mon[94470]: from='client.34199 -' entity='client.admin' cmd=[{"prefix": "orch upgrade check", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:27:26.659 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:26 vm00 ceph-mon[94470]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 73 KiB/s rd, 0 B/s wr, 120 op/s 2026-03-10T13:27:26.659 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:26 vm00 ceph-mon[94470]: from='client.54118 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:27:26.659 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:26 vm00 ceph-mon[94470]: from='client.? 192.168.123.100:0/3166364725' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T13:27:26.660 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:26 vm00 ceph-mon[96293]: from='client.34199 -' entity='client.admin' cmd=[{"prefix": "orch upgrade check", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:27:26.660 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:26 vm00 ceph-mon[96293]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 73 KiB/s rd, 0 B/s wr, 120 op/s 2026-03-10T13:27:26.660 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:26 vm00 ceph-mon[96293]: from='client.54118 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:27:26.660 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:26 vm00 ceph-mon[96293]: from='client.? 192.168.123.100:0/3166364725' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T13:27:26.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:26 vm08 ceph-mon[82639]: from='client.34199 -' entity='client.admin' cmd=[{"prefix": "orch upgrade check", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:27:26.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:26 vm08 ceph-mon[82639]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 73 KiB/s rd, 0 B/s wr, 120 op/s 2026-03-10T13:27:26.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:26 vm08 ceph-mon[82639]: from='client.54118 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:27:26.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:26 vm08 ceph-mon[82639]: from='client.? 192.168.123.100:0/3166364725' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T13:27:27.236 INFO:teuthology.orchestra.run.vm00.stdout:Initiating upgrade to quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:27:27.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:27:26 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:27:26.989Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:27:27.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:27:26 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:27:26.991Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:27:27.320 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'while ceph orch upgrade status | jq '"'"'.in_progress'"'"' | grep true && ! ceph orch upgrade status | jq '"'"'.message'"'"' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done' 2026-03-10T13:27:27.411 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:27 vm00 ceph-mon[94470]: from='client.44163 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "osd", "limit": 2, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:27:27.411 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:27 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:27.411 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:27 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:27.411 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:27 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:27:27.411 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:27 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:27.411 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:27 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:27:27.411 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:27 vm00 ceph-mon[96293]: from='client.44163 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "osd", "limit": 2, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:27:27.411 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:27 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:27.411 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:27 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:27.411 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:27 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:27:27.411 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:27 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:27.411 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:27 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:27:27.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:27 vm08 ceph-mon[82639]: from='client.44163 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "osd", "limit": 2, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:27:27.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:27 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:27.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:27 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:27.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:27 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:27:27.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:27 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:27.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:27 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:27:27.897 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T13:27:28.346 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T13:27:28.346 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (13m) 24s ago 20m 26.4M - 0.25.0 c8568f914cd2 12fde3cf83cb 2026-03-10T13:27:28.346 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm08 *:3000 running (90s) 29s ago 20m 74.2M - 10.4.0 c8b91775d855 960e32589e98 2026-03-10T13:27:28.346 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dezodo vm00 running (97s) 24s ago 20m 49.0M - 3.5 e1d6a67b021e 630bf6d4e7f3 2026-03-10T13:27:28.346 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm08 *:8443,9283,8765 running (102s) 29s ago 22m 486M - 19.2.3-678-ge911bdeb 654f31e6858e 31b91eebc856 2026-03-10T13:27:28.346 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (8m) 24s ago 23m 541M - 19.2.3-678-ge911bdeb 654f31e6858e 5bc576d4d32b 2026-03-10T13:27:28.346 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (35s) 24s ago 23m 41.7M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 981df6371890 2026-03-10T13:27:28.346 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm08 running (70s) 29s ago 22m 37.6M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 8cceb678a9ee 2026-03-10T13:27:28.346 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (26s) 24s ago 22m 21.0M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 43deda66dee3 2026-03-10T13:27:28.346 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (13m) 24s ago 20m 10.6M - 1.7.0 72c9c2088986 bcf883401619 2026-03-10T13:27:28.346 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm08 *:9100 running (13m) 29s ago 20m 10.0M - 1.7.0 72c9c2088986 4ac83f03f818 2026-03-10T13:27:28.346 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (22m) 24s ago 22m 53.2M 4096M 17.2.0 e1d6a67b021e 2919c7073fa7 2026-03-10T13:27:28.346 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (21m) 24s ago 21m 57.5M 4096M 17.2.0 e1d6a67b021e 647927dc41ea 2026-03-10T13:27:28.346 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (21m) 24s ago 21m 55.7M 4096M 17.2.0 e1d6a67b021e 1e417e82c2b9 2026-03-10T13:27:28.346 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (21m) 24s ago 21m 53.4M 4096M 17.2.0 e1d6a67b021e 98bb6d7ea69f 2026-03-10T13:27:28.346 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm08 running (21m) 29s ago 21m 54.7M 4096M 17.2.0 e1d6a67b021e e349440ca776 2026-03-10T13:27:28.347 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm08 running (21m) 29s ago 21m 56.5M 4096M 17.2.0 e1d6a67b021e 9a6a2a67cc99 2026-03-10T13:27:28.347 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm08 running (21m) 29s ago 21m 52.2M 4096M 17.2.0 e1d6a67b021e c27676916d52 2026-03-10T13:27:28.347 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm08 running (20m) 29s ago 20m 54.0M 4096M 17.2.0 e1d6a67b021e bf67951990a5 2026-03-10T13:27:28.347 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm08 *:9095 running (103s) 29s ago 20m 48.2M - 2.51.0 1d3b7f56885b 3f9b2d0821c9 2026-03-10T13:27:28.347 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.tvlvzo vm00 *:8000 running (20m) 24s ago 20m 97.4M - 17.2.0 e1d6a67b021e be24eac16807 2026-03-10T13:27:28.347 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm08.ljayps vm08 *:8000 running (20m) 29s ago 20m 94.3M - 17.2.0 e1d6a67b021e 67b525427823 2026-03-10T13:27:28.628 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:27:28.628 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T13:27:28.628 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-10T13:27:28.628 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:27:28.628 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T13:27:28.628 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T13:27:28.628 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:27:28.628 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T13:27:28.628 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-10T13:27:28.628 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:27:28.628 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T13:27:28.628 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-10T13:27:28.628 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:27:28.628 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T13:27:28.628 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 10, 2026-03-10T13:27:28.628 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 5 2026-03-10T13:27:28.628 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T13:27:28.628 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:27:28.876 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[94470]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[94470]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 0 B/s wr, 119 op/s 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[94470]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[94470]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[94470]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all mgr 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all mon 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all crash 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[94470]: from='mon.2 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[94470]: Upgrade: osd.0 is safe to restart 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[94470]: from='client.44169 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[94470]: from='client.? 192.168.123.100:0/3069403426' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[96293]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[96293]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 0 B/s wr, 119 op/s 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[96293]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[96293]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[96293]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all mgr 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all mon 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all crash 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[96293]: from='mon.2 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[96293]: Upgrade: osd.0 is safe to restart 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[96293]: from='client.44169 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:28.877 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:28 vm00 ceph-mon[96293]: from='client.? 192.168.123.100:0/3069403426' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:28.877 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:27:28.877 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T13:27:28.877 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": true, 2026-03-10T13:27:28.878 INFO:teuthology.orchestra.run.vm00.stdout: "which": "Upgrading daemons of type(s) osd. Upgrade limited to 2 daemons (2 remaining).", 2026-03-10T13:27:28.878 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [], 2026-03-10T13:27:28.878 INFO:teuthology.orchestra.run.vm00.stdout: "progress": "0/8 daemons upgraded", 2026-03-10T13:27:28.878 INFO:teuthology.orchestra.run.vm00.stdout: "message": "Currently upgrading osd daemons", 2026-03-10T13:27:28.878 INFO:teuthology.orchestra.run.vm00.stdout: "is_paused": false 2026-03-10T13:27:28.878 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:27:29.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:28 vm08 ceph-mon[82639]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:27:29.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:28 vm08 ceph-mon[82639]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 0 B/s wr, 119 op/s 2026-03-10T13:27:29.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:28 vm08 ceph-mon[82639]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:27:29.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:28 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:29.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:28 vm08 ceph-mon[82639]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-10T13:27:29.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:28 vm08 ceph-mon[82639]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-10T13:27:29.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:28 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:27:29.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:28 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:29.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:28 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all mgr 2026-03-10T13:27:29.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:28 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:29.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:28 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:29.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:28 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all mon 2026-03-10T13:27:29.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:28 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:29.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:28 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:29.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:28 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all crash 2026-03-10T13:27:29.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:28 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:29.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:28 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-10T13:27:29.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:28 vm08 ceph-mon[82639]: from='mon.2 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-10T13:27:29.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:28 vm08 ceph-mon[82639]: Upgrade: osd.0 is safe to restart 2026-03-10T13:27:29.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:28 vm08 ceph-mon[82639]: from='client.44169 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:27:29.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:28 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:29.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:28 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-10T13:27:29.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:28 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:29.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:28 vm08 ceph-mon[82639]: from='client.? 192.168.123.100:0/3069403426' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:29.253 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:28 vm00 systemd[1]: Stopping Ceph osd.0 for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:27:29.253 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:29 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0[54704]: 2026-03-10T13:27:29.051+0000 7fdae3992700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T13:27:29.253 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:29 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0[54704]: 2026-03-10T13:27:29.051+0000 7fdae3992700 -1 osd.0 95 *** Got signal Terminated *** 2026-03-10T13:27:29.253 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:29 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0[54704]: 2026-03-10T13:27:29.051+0000 7fdae3992700 -1 osd.0 95 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T13:27:30.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:29 vm08 ceph-mon[82639]: from='client.44175 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:27:30.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:29 vm08 ceph-mon[82639]: Upgrade: Updating osd.0 2026-03-10T13:27:30.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:29 vm08 ceph-mon[82639]: Deploying daemon osd.0 on vm00 2026-03-10T13:27:30.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:29 vm08 ceph-mon[82639]: from='client.34220 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:27:30.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:29 vm08 ceph-mon[82639]: from='client.44184 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:27:30.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:29 vm08 ceph-mon[82639]: osd.0 marked itself down and dead 2026-03-10T13:27:30.058 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:29 vm00 ceph-mon[94470]: from='client.44175 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:27:30.058 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:29 vm00 ceph-mon[94470]: Upgrade: Updating osd.0 2026-03-10T13:27:30.058 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:29 vm00 ceph-mon[94470]: Deploying daemon osd.0 on vm00 2026-03-10T13:27:30.058 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:29 vm00 ceph-mon[94470]: from='client.34220 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:27:30.058 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:29 vm00 ceph-mon[94470]: from='client.44184 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:27:30.058 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:29 vm00 ceph-mon[94470]: osd.0 marked itself down and dead 2026-03-10T13:27:30.058 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:29 vm00 ceph-mon[96293]: from='client.44175 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:27:30.058 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:29 vm00 ceph-mon[96293]: Upgrade: Updating osd.0 2026-03-10T13:27:30.058 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:29 vm00 ceph-mon[96293]: Deploying daemon osd.0 on vm00 2026-03-10T13:27:30.058 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:29 vm00 ceph-mon[96293]: from='client.34220 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:27:30.058 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:29 vm00 ceph-mon[96293]: from='client.44184 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:27:30.058 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:29 vm00 ceph-mon[96293]: osd.0 marked itself down and dead 2026-03-10T13:27:30.058 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:29 vm00 podman[101651]: 2026-03-10 13:27:29.806017025 +0000 UTC m=+0.766862530 container died 2919c7073fa7f6fece4a40966c71a3de08dcb20dcfe7894090dcc534187e1691 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0, version=8, CEPH_POINT_RELEASE=-17.2.0, RELEASE=HEAD, distribution-scope=public, GIT_REPO=https://github.com/ceph/ceph-container.git, vendor=Red Hat, Inc., com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.openshift.expose-services=, name=centos-stream, build-date=2022-05-03T08:36:31.336870, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.openshift.tags=base centos centos-stream, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, architecture=x86_64, maintainer=Guillaume Abrioux , vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, vcs-type=git, GIT_CLEAN=True, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, ceph=True, io.k8s.display-name=CentOS Stream 8, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=754, io.buildah.version=1.19.8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, GIT_BRANCH=HEAD, com.redhat.component=centos-stream-container, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image) 2026-03-10T13:27:30.059 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:29 vm00 podman[101651]: 2026-03-10 13:27:29.8395995 +0000 UTC m=+0.800445005 container remove 2919c7073fa7f6fece4a40966c71a3de08dcb20dcfe7894090dcc534187e1691 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0, io.openshift.tags=base centos centos-stream, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, distribution-scope=public, name=centos-stream, vcs-type=git, vendor=Red Hat, Inc., version=8, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.k8s.display-name=CentOS Stream 8, io.openshift.expose-services=, architecture=x86_64, com.redhat.component=centos-stream-container, release=754, CEPH_POINT_RELEASE=-17.2.0, build-date=2022-05-03T08:36:31.336870, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, GIT_BRANCH=HEAD, RELEASE=HEAD, GIT_REPO=https://github.com/ceph/ceph-container.git, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, maintainer=Guillaume Abrioux , summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., GIT_CLEAN=True, ceph=True, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.buildah.version=1.19.8) 2026-03-10T13:27:30.059 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:29 vm00 bash[101651]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0 2026-03-10T13:27:30.059 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:29 vm00 podman[101720]: 2026-03-10 13:27:29.965222296 +0000 UTC m=+0.015831177 container create 1940208f7c827ff6b0a162d1e52edca395f203c335c05ace087f17798cbbe23f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0-deactivate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T13:27:30.059 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:29 vm00 podman[101720]: 2026-03-10 13:27:29.999935187 +0000 UTC m=+0.050544068 container init 1940208f7c827ff6b0a162d1e52edca395f203c335c05ace087f17798cbbe23f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0-deactivate, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , ceph=True) 2026-03-10T13:27:30.059 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:30 vm00 podman[101720]: 2026-03-10 13:27:30.002926347 +0000 UTC m=+0.053535228 container start 1940208f7c827ff6b0a162d1e52edca395f203c335c05ace087f17798cbbe23f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0-deactivate, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, io.buildah.version=1.41.3, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T13:27:30.059 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:30 vm00 podman[101720]: 2026-03-10 13:27:30.003704213 +0000 UTC m=+0.054313095 container attach 1940208f7c827ff6b0a162d1e52edca395f203c335c05ace087f17798cbbe23f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0-deactivate, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2) 2026-03-10T13:27:30.059 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:30 vm00 podman[101720]: 2026-03-10 13:27:29.958420773 +0000 UTC m=+0.009029654 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:27:30.335 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:30 vm00 podman[101720]: 2026-03-10 13:27:30.126995995 +0000 UTC m=+0.177604865 container died 1940208f7c827ff6b0a162d1e52edca395f203c335c05ace087f17798cbbe23f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0-deactivate, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T13:27:30.335 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:30 vm00 podman[101720]: 2026-03-10 13:27:30.143572395 +0000 UTC m=+0.194181267 container remove 1940208f7c827ff6b0a162d1e52edca395f203c335c05ace087f17798cbbe23f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_REF=squid) 2026-03-10T13:27:30.335 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:30 vm00 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.0.service: Deactivated successfully. 2026-03-10T13:27:30.335 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:30 vm00 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.0.service: Unit process 101731 (conmon) remains running after unit stopped. 2026-03-10T13:27:30.335 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:30 vm00 systemd[1]: Stopped Ceph osd.0 for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:27:30.335 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:30 vm00 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.0.service: Consumed 9.910s CPU time, 167.7M memory peak. 2026-03-10T13:27:30.753 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:30 vm00 systemd[1]: Starting Ceph osd.0 for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:27:30.753 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:30 vm00 podman[101821]: 2026-03-10 13:27:30.42526997 +0000 UTC m=+0.015887732 container create b706ff375773676fe34ec294a70d44c372d9b7caca712a1200be5521756ea39c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0-activate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_REF=squid, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, ceph=True) 2026-03-10T13:27:30.753 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:30 vm00 podman[101821]: 2026-03-10 13:27:30.462769459 +0000 UTC m=+0.053387221 container init b706ff375773676fe34ec294a70d44c372d9b7caca712a1200be5521756ea39c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0-activate, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-10T13:27:30.753 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:30 vm00 podman[101821]: 2026-03-10 13:27:30.465621959 +0000 UTC m=+0.056239711 container start b706ff375773676fe34ec294a70d44c372d9b7caca712a1200be5521756ea39c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0-activate, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T13:27:30.753 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:30 vm00 podman[101821]: 2026-03-10 13:27:30.468085481 +0000 UTC m=+0.058703243 container attach b706ff375773676fe34ec294a70d44c372d9b7caca712a1200be5521756ea39c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0-activate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.build-date=20260223, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T13:27:30.753 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:30 vm00 podman[101821]: 2026-03-10 13:27:30.419005643 +0000 UTC m=+0.009623416 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:27:30.753 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0-activate[101831]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:27:30.753 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:30 vm00 bash[101821]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:27:30.753 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0-activate[101831]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:27:30.753 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:30 vm00 bash[101821]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:27:31.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:30 vm08 ceph-mon[82639]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 73 KiB/s rd, 0 B/s wr, 120 op/s 2026-03-10T13:27:31.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:30 vm08 ceph-mon[82639]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T13:27:31.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:30 vm08 ceph-mon[82639]: osdmap e96: 8 total, 7 up, 8 in 2026-03-10T13:27:31.032 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:30 vm00 ceph-mon[94470]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 73 KiB/s rd, 0 B/s wr, 120 op/s 2026-03-10T13:27:31.032 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:30 vm00 ceph-mon[94470]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T13:27:31.032 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:30 vm00 ceph-mon[94470]: osdmap e96: 8 total, 7 up, 8 in 2026-03-10T13:27:31.032 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0-activate[101831]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T13:27:31.032 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0-activate[101831]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:27:31.032 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:30 vm00 bash[101821]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T13:27:31.032 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:30 vm00 bash[101821]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:27:31.032 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:30 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0-activate[101831]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:27:31.032 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:30 vm00 bash[101821]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:27:31.032 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:31 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0-activate[101831]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-10T13:27:31.033 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:30 vm00 ceph-mon[96293]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 73 KiB/s rd, 0 B/s wr, 120 op/s 2026-03-10T13:27:31.033 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:30 vm00 ceph-mon[96293]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T13:27:31.033 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:30 vm00 ceph-mon[96293]: osdmap e96: 8 total, 7 up, 8 in 2026-03-10T13:27:31.313 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:31 vm00 bash[101821]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-10T13:27:31.313 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:31 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0-activate[101831]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-23ba2dc8-5a29-44d6-9782-670b7d8d1c44/osd-block-33741dbc-5269-4c43-97b4-ac057d7a2041 --path /var/lib/ceph/osd/ceph-0 --no-mon-config 2026-03-10T13:27:31.313 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:31 vm00 bash[101821]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-23ba2dc8-5a29-44d6-9782-670b7d8d1c44/osd-block-33741dbc-5269-4c43-97b4-ac057d7a2041 --path /var/lib/ceph/osd/ceph-0 --no-mon-config 2026-03-10T13:27:31.314 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:31 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0-activate[101831]: Running command: /usr/bin/ln -snf /dev/ceph-23ba2dc8-5a29-44d6-9782-670b7d8d1c44/osd-block-33741dbc-5269-4c43-97b4-ac057d7a2041 /var/lib/ceph/osd/ceph-0/block 2026-03-10T13:27:31.753 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:31 vm00 bash[101821]: Running command: /usr/bin/ln -snf /dev/ceph-23ba2dc8-5a29-44d6-9782-670b7d8d1c44/osd-block-33741dbc-5269-4c43-97b4-ac057d7a2041 /var/lib/ceph/osd/ceph-0/block 2026-03-10T13:27:31.753 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:31 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0-activate[101831]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-0/block 2026-03-10T13:27:31.753 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:31 vm00 bash[101821]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-0/block 2026-03-10T13:27:31.753 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:31 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0-activate[101831]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-10T13:27:31.753 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:31 vm00 bash[101821]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-10T13:27:31.753 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:31 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0-activate[101831]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-10T13:27:31.753 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:31 vm00 bash[101821]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-10T13:27:31.753 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:31 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0-activate[101831]: --> ceph-volume lvm activate successful for osd ID: 0 2026-03-10T13:27:31.753 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:31 vm00 bash[101821]: --> ceph-volume lvm activate successful for osd ID: 0 2026-03-10T13:27:31.753 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:31 vm00 podman[101821]: 2026-03-10 13:27:31.342803565 +0000 UTC m=+0.933421327 container died b706ff375773676fe34ec294a70d44c372d9b7caca712a1200be5521756ea39c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0-activate, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, ceph=True) 2026-03-10T13:27:31.753 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:31 vm00 podman[101821]: 2026-03-10 13:27:31.364693213 +0000 UTC m=+0.955310964 container remove b706ff375773676fe34ec294a70d44c372d9b7caca712a1200be5521756ea39c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0-activate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, CEPH_REF=squid, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0) 2026-03-10T13:27:31.753 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:31 vm00 podman[102068]: 2026-03-10 13:27:31.450226109 +0000 UTC m=+0.016354747 container create 5fc74f4d21799f9910b5d2765dd5ccb080a8c8bf5820c225ffc639a3f3e0f0ad (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, ceph=True, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default) 2026-03-10T13:27:31.753 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:31 vm00 podman[102068]: 2026-03-10 13:27:31.487498793 +0000 UTC m=+0.053627441 container init 5fc74f4d21799f9910b5d2765dd5ccb080a8c8bf5820c225ffc639a3f3e0f0ad (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T13:27:31.753 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:31 vm00 podman[102068]: 2026-03-10 13:27:31.491165897 +0000 UTC m=+0.057294545 container start 5fc74f4d21799f9910b5d2765dd5ccb080a8c8bf5820c225ffc639a3f3e0f0ad (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, CEPH_REF=squid, ceph=True, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2) 2026-03-10T13:27:31.753 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:31 vm00 bash[102068]: 5fc74f4d21799f9910b5d2765dd5ccb080a8c8bf5820c225ffc639a3f3e0f0ad 2026-03-10T13:27:31.753 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:31 vm00 podman[102068]: 2026-03-10 13:27:31.443728786 +0000 UTC m=+0.009857424 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:27:31.753 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:31 vm00 systemd[1]: Started Ceph osd.0 for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:27:32.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:31 vm00 ceph-mon[96293]: osdmap e97: 8 total, 7 up, 8 in 2026-03-10T13:27:32.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:31 vm00 ceph-mon[96293]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:27:32.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:31 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:32.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:31 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:32.113 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:31 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:32.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:31 vm00 ceph-mon[94470]: osdmap e97: 8 total, 7 up, 8 in 2026-03-10T13:27:32.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:31 vm00 ceph-mon[94470]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:27:32.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:31 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:32.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:31 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:32.113 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:31 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:32.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:31 vm08 ceph-mon[82639]: osdmap e97: 8 total, 7 up, 8 in 2026-03-10T13:27:32.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:31 vm08 ceph-mon[82639]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:27:32.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:31 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:32.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:31 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:32.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:31 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:32.382 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:32 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0[102078]: 2026-03-10T13:27:32.315+0000 7f12ddd26740 -1 Falling back to public interface 2026-03-10T13:27:33.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:32 vm00 ceph-mon[94470]: pgmap v22: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:27:33.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:32 vm00 ceph-mon[96293]: pgmap v22: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:27:33.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:32 vm08 ceph-mon[82639]: pgmap v22: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:27:33.753 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0[102078]: 2026-03-10T13:27:33.428+0000 7f12ddd26740 -1 osd.0 0 read_superblock omap replica is missing. 2026-03-10T13:27:33.753 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:33 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0[102078]: 2026-03-10T13:27:33.477+0000 7f12ddd26740 -1 osd.0 95 log_to_monitors true 2026-03-10T13:27:34.067 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:33 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:34.067 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:33 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:34.067 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:33 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:34.067 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:33 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:34.067 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:33 vm00 ceph-mon[94470]: from='osd.0 [v2:192.168.123.100:6802/403112501,v1:192.168.123.100:6803/403112501]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T13:27:34.067 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:33 vm00 ceph-mon[94470]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T13:27:34.067 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:33 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:34.067 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:33 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:34.067 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:33 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:34.067 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:33 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:34.067 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:33 vm00 ceph-mon[96293]: from='osd.0 [v2:192.168.123.100:6802/403112501,v1:192.168.123.100:6803/403112501]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T13:27:34.067 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:33 vm00 ceph-mon[96293]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T13:27:34.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:33 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:34.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:33 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:34.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:33 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:34.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:33 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:34.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:33 vm08 ceph-mon[82639]: from='osd.0 [v2:192.168.123.100:6802/403112501,v1:192.168.123.100:6803/403112501]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T13:27:34.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:33 vm08 ceph-mon[82639]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T13:27:34.381 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:27:34 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0[102078]: 2026-03-10T13:27:34.273+0000 7f12d5ad1640 -1 osd.0 95 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T13:27:34.995 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:34 vm00 ceph-mon[94470]: pgmap v23: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-10T13:27:34.995 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:34 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:34.995 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:34 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:34.995 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:34 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:34.995 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:34 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:27:34.995 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:34 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:34.995 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:34 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:27:34.995 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:34 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:34.995 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:34 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:34.995 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:34 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:34.995 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:34 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-10T13:27:34.995 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:34 vm00 ceph-mon[94470]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-10T13:27:34.995 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:34 vm00 ceph-mon[94470]: osdmap e98: 8 total, 7 up, 8 in 2026-03-10T13:27:34.995 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:34 vm00 ceph-mon[94470]: from='osd.0 [v2:192.168.123.100:6802/403112501,v1:192.168.123.100:6803/403112501]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:27:34.995 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:34 vm00 ceph-mon[94470]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:27:34.995 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:34 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:34.995 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:34 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-10T13:27:34.995 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:34 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:34.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:34 vm00 ceph-mon[96293]: pgmap v23: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-10T13:27:34.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:34 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:34.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:34 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:34.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:34 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:34.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:34 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:27:34.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:34 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:34.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:34 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:27:34.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:34 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:34.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:34 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:34.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:34 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:34.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:34 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-10T13:27:34.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:34 vm00 ceph-mon[96293]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-10T13:27:34.996 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:34 vm00 ceph-mon[96293]: osdmap e98: 8 total, 7 up, 8 in 2026-03-10T13:27:34.997 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:34 vm00 ceph-mon[96293]: from='osd.0 [v2:192.168.123.100:6802/403112501,v1:192.168.123.100:6803/403112501]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:27:34.997 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:34 vm00 ceph-mon[96293]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:27:34.997 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:34 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:34.997 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:34 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-10T13:27:34.997 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:34 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:35.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:34 vm08 ceph-mon[82639]: pgmap v23: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-10T13:27:35.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:34 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:35.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:34 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:35.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:34 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:35.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:34 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:27:35.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:34 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:35.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:34 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:27:35.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:34 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:35.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:34 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:35.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:34 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:35.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:34 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-10T13:27:35.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:34 vm08 ceph-mon[82639]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-10T13:27:35.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:34 vm08 ceph-mon[82639]: osdmap e98: 8 total, 7 up, 8 in 2026-03-10T13:27:35.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:34 vm08 ceph-mon[82639]: from='osd.0 [v2:192.168.123.100:6802/403112501,v1:192.168.123.100:6803/403112501]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:27:35.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:34 vm08 ceph-mon[82639]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:27:35.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:34 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:35.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:34 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-10T13:27:35.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:34 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:35.690 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:35 vm00 systemd[1]: Stopping Ceph osd.1 for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:27:36.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:35 vm00 ceph-mon[94470]: from='mon.2 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-10T13:27:36.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:35 vm00 ceph-mon[94470]: Upgrade: osd.1 is safe to restart 2026-03-10T13:27:36.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:35 vm00 ceph-mon[94470]: Upgrade: Updating osd.1 2026-03-10T13:27:36.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:35 vm00 ceph-mon[94470]: Deploying daemon osd.1 on vm00 2026-03-10T13:27:36.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:35 vm00 ceph-mon[94470]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T13:27:36.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:35 vm00 ceph-mon[94470]: Cluster is now healthy 2026-03-10T13:27:36.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:35 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T13:27:36.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:35 vm00 ceph-mon[94470]: osd.0 [v2:192.168.123.100:6802/403112501,v1:192.168.123.100:6803/403112501] boot 2026-03-10T13:27:36.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:35 vm00 ceph-mon[94470]: osdmap e99: 8 total, 8 up, 8 in 2026-03-10T13:27:36.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:35 vm00 ceph-mon[94470]: osd.1 marked itself down and dead 2026-03-10T13:27:36.003 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:27:35 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:27:35] "GET /metrics HTTP/1.1" 200 37589 "" "Prometheus/2.51.0" 2026-03-10T13:27:36.003 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:35 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1[57427]: 2026-03-10T13:27:35.756+0000 7f54620ef700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T13:27:36.003 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:35 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1[57427]: 2026-03-10T13:27:35.756+0000 7f54620ef700 -1 osd.1 99 *** Got signal Terminated *** 2026-03-10T13:27:36.003 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:35 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1[57427]: 2026-03-10T13:27:35.756+0000 7f54620ef700 -1 osd.1 99 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T13:27:36.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:35 vm00 ceph-mon[96293]: from='mon.2 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-10T13:27:36.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:35 vm00 ceph-mon[96293]: Upgrade: osd.1 is safe to restart 2026-03-10T13:27:36.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:35 vm00 ceph-mon[96293]: Upgrade: Updating osd.1 2026-03-10T13:27:36.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:35 vm00 ceph-mon[96293]: Deploying daemon osd.1 on vm00 2026-03-10T13:27:36.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:35 vm00 ceph-mon[96293]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T13:27:36.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:35 vm00 ceph-mon[96293]: Cluster is now healthy 2026-03-10T13:27:36.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:35 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T13:27:36.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:35 vm00 ceph-mon[96293]: osd.0 [v2:192.168.123.100:6802/403112501,v1:192.168.123.100:6803/403112501] boot 2026-03-10T13:27:36.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:35 vm00 ceph-mon[96293]: osdmap e99: 8 total, 8 up, 8 in 2026-03-10T13:27:36.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:35 vm00 ceph-mon[96293]: osd.1 marked itself down and dead 2026-03-10T13:27:36.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:35 vm08 ceph-mon[82639]: from='mon.2 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-10T13:27:36.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:35 vm08 ceph-mon[82639]: Upgrade: osd.1 is safe to restart 2026-03-10T13:27:36.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:35 vm08 ceph-mon[82639]: Upgrade: Updating osd.1 2026-03-10T13:27:36.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:35 vm08 ceph-mon[82639]: Deploying daemon osd.1 on vm00 2026-03-10T13:27:36.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:35 vm08 ceph-mon[82639]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T13:27:36.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:35 vm08 ceph-mon[82639]: Cluster is now healthy 2026-03-10T13:27:36.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:35 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T13:27:36.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:35 vm08 ceph-mon[82639]: osd.0 [v2:192.168.123.100:6802/403112501,v1:192.168.123.100:6803/403112501] boot 2026-03-10T13:27:36.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:35 vm08 ceph-mon[82639]: osdmap e99: 8 total, 8 up, 8 in 2026-03-10T13:27:36.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:35 vm08 ceph-mon[82639]: osd.1 marked itself down and dead 2026-03-10T13:27:36.658 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:36 vm00 podman[105760]: 2026-03-10 13:27:36.287183458 +0000 UTC m=+0.576040103 container died 647927dc41ea28257331e06786554f97771c0f4697fb31eeb0b65f8f089dd567 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1, com.redhat.component=centos-stream-container, version=8, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., ceph=True, build-date=2022-05-03T08:36:31.336870, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, distribution-scope=public, CEPH_POINT_RELEASE=-17.2.0, io.k8s.display-name=CentOS Stream 8, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, io.openshift.tags=base centos centos-stream, vcs-type=git, vendor=Red Hat, Inc., GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, maintainer=Guillaume Abrioux , io.openshift.expose-services=, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_BRANCH=HEAD, GIT_REPO=https://github.com/ceph/ceph-container.git, RELEASE=HEAD, GIT_CLEAN=True, release=754, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, architecture=x86_64, io.buildah.version=1.19.8, name=centos-stream) 2026-03-10T13:27:36.658 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:36 vm00 podman[105760]: 2026-03-10 13:27:36.339396811 +0000 UTC m=+0.628253456 container remove 647927dc41ea28257331e06786554f97771c0f4697fb31eeb0b65f8f089dd567 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1, version=8, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, maintainer=Guillaume Abrioux , summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vendor=Red Hat, Inc., GIT_REPO=https://github.com/ceph/ceph-container.git, CEPH_POINT_RELEASE=-17.2.0, ceph=True, GIT_CLEAN=True, architecture=x86_64, io.openshift.tags=base centos centos-stream, name=centos-stream, vcs-type=git, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.buildah.version=1.19.8, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, RELEASE=HEAD, com.redhat.component=centos-stream-container, GIT_BRANCH=HEAD, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, distribution-scope=public, build-date=2022-05-03T08:36:31.336870, io.openshift.expose-services=, io.k8s.display-name=CentOS Stream 8, release=754) 2026-03-10T13:27:36.658 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:36 vm00 bash[105760]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1 2026-03-10T13:27:36.962 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:36 vm00 ceph-mon[94470]: pgmap v26: 161 pgs: 37 active+undersized, 21 active+undersized+degraded, 103 active+clean; 457 KiB data, 123 MiB used, 160 GiB / 160 GiB avail; 68/627 objects degraded (10.845%) 2026-03-10T13:27:36.962 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:36 vm00 ceph-mon[94470]: Health check failed: Degraded data redundancy: 68/627 objects degraded (10.845%), 21 pgs degraded (PG_DEGRADED) 2026-03-10T13:27:36.962 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:36 vm00 ceph-mon[94470]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T13:27:36.962 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:36 vm00 ceph-mon[94470]: osdmap e100: 8 total, 7 up, 8 in 2026-03-10T13:27:36.962 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:36 vm00 podman[105826]: 2026-03-10 13:27:36.65728224 +0000 UTC m=+0.045589545 container create 4148fdab77c46b2f82447f074177254f3c31243c47ce0163baad6c7920a329ec (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1-deactivate, CEPH_REF=squid, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223) 2026-03-10T13:27:36.962 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:36 vm00 podman[105826]: 2026-03-10 13:27:36.705835121 +0000 UTC m=+0.094142435 container init 4148fdab77c46b2f82447f074177254f3c31243c47ce0163baad6c7920a329ec (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1-deactivate, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS) 2026-03-10T13:27:36.962 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:36 vm00 podman[105826]: 2026-03-10 13:27:36.70896008 +0000 UTC m=+0.097267385 container start 4148fdab77c46b2f82447f074177254f3c31243c47ce0163baad6c7920a329ec (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1-deactivate, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_REF=squid, OSD_FLAVOR=default) 2026-03-10T13:27:36.962 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:36 vm00 podman[105826]: 2026-03-10 13:27:36.713081045 +0000 UTC m=+0.101388350 container attach 4148fdab77c46b2f82447f074177254f3c31243c47ce0163baad6c7920a329ec (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, ceph=True, CEPH_REF=squid) 2026-03-10T13:27:36.962 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:36 vm00 podman[105826]: 2026-03-10 13:27:36.644854906 +0000 UTC m=+0.033162221 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:27:36.962 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:36 vm00 conmon[105839]: conmon 4148fdab77c46b2f8244 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-4148fdab77c46b2f82447f074177254f3c31243c47ce0163baad6c7920a329ec.scope/container/memory.events 2026-03-10T13:27:36.962 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:36 vm00 podman[105826]: 2026-03-10 13:27:36.828675978 +0000 UTC m=+0.216983272 container died 4148fdab77c46b2f82447f074177254f3c31243c47ce0163baad6c7920a329ec (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1-deactivate, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, CEPH_REF=squid, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS) 2026-03-10T13:27:36.962 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:36 vm00 podman[105826]: 2026-03-10 13:27:36.869312318 +0000 UTC m=+0.257619623 container remove 4148fdab77c46b2f82447f074177254f3c31243c47ce0163baad6c7920a329ec (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1-deactivate, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, CEPH_REF=squid, ceph=True) 2026-03-10T13:27:36.962 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:36 vm00 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.1.service: Deactivated successfully. 2026-03-10T13:27:36.962 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:36 vm00 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.1.service: Unit process 105839 (conmon) remains running after unit stopped. 2026-03-10T13:27:36.962 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:36 vm00 systemd[1]: Stopped Ceph osd.1 for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:27:36.963 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:36 vm00 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.1.service: Consumed 16.171s CPU time, 182.6M memory peak. 2026-03-10T13:27:36.963 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:36 vm00 ceph-mon[96293]: pgmap v26: 161 pgs: 37 active+undersized, 21 active+undersized+degraded, 103 active+clean; 457 KiB data, 123 MiB used, 160 GiB / 160 GiB avail; 68/627 objects degraded (10.845%) 2026-03-10T13:27:36.963 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:36 vm00 ceph-mon[96293]: Health check failed: Degraded data redundancy: 68/627 objects degraded (10.845%), 21 pgs degraded (PG_DEGRADED) 2026-03-10T13:27:36.963 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:36 vm00 ceph-mon[96293]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T13:27:36.963 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:36 vm00 ceph-mon[96293]: osdmap e100: 8 total, 7 up, 8 in 2026-03-10T13:27:37.253 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:37 vm00 systemd[1]: Starting Ceph osd.1 for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:27:37.253 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:37 vm00 podman[105927]: 2026-03-10 13:27:37.20100857 +0000 UTC m=+0.017379775 container create ac585739fb8ad2b34ca6014f9573744821b78a09584c8476cb3bc9ffdcb0e26a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1-activate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T13:27:37.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:27:36 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:27:36.992Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:27:37.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:27:36 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:27:36.993Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:27:37.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:36 vm08 ceph-mon[82639]: pgmap v26: 161 pgs: 37 active+undersized, 21 active+undersized+degraded, 103 active+clean; 457 KiB data, 123 MiB used, 160 GiB / 160 GiB avail; 68/627 objects degraded (10.845%) 2026-03-10T13:27:37.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:36 vm08 ceph-mon[82639]: Health check failed: Degraded data redundancy: 68/627 objects degraded (10.845%), 21 pgs degraded (PG_DEGRADED) 2026-03-10T13:27:37.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:36 vm08 ceph-mon[82639]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T13:27:37.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:36 vm08 ceph-mon[82639]: osdmap e100: 8 total, 7 up, 8 in 2026-03-10T13:27:37.753 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:37 vm00 podman[105927]: 2026-03-10 13:27:37.290972402 +0000 UTC m=+0.107343607 container init ac585739fb8ad2b34ca6014f9573744821b78a09584c8476cb3bc9ffdcb0e26a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1-activate, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T13:27:37.754 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:37 vm00 podman[105927]: 2026-03-10 13:27:37.192732657 +0000 UTC m=+0.009103873 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:27:37.754 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:37 vm00 podman[105927]: 2026-03-10 13:27:37.295350689 +0000 UTC m=+0.111721883 container start ac585739fb8ad2b34ca6014f9573744821b78a09584c8476cb3bc9ffdcb0e26a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1-activate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS) 2026-03-10T13:27:37.754 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:37 vm00 podman[105927]: 2026-03-10 13:27:37.29789985 +0000 UTC m=+0.114271055 container attach ac585739fb8ad2b34ca6014f9573744821b78a09584c8476cb3bc9ffdcb0e26a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1-activate, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3) 2026-03-10T13:27:37.754 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:37 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1-activate[105940]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:27:37.754 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:37 vm00 bash[105927]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:27:37.754 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:37 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1-activate[105940]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:27:37.754 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:37 vm00 bash[105927]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:27:38.376 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:38 vm00 ceph-mon[94470]: osdmap e101: 8 total, 7 up, 8 in 2026-03-10T13:27:38.376 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:38 vm00 ceph-mon[94470]: pgmap v29: 161 pgs: 7 stale+active+undersized, 15 stale+active+clean, 3 stale+active+undersized+degraded, 30 active+undersized, 18 active+undersized+degraded, 88 active+clean; 457 KiB data, 123 MiB used, 160 GiB / 160 GiB avail; 68/627 objects degraded (10.845%) 2026-03-10T13:27:38.376 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:38 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1-activate[105940]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T13:27:38.376 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:38 vm00 bash[105927]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T13:27:38.376 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:38 vm00 bash[105927]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:27:38.376 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:38 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1-activate[105940]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:27:38.376 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:38 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1-activate[105940]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:27:38.376 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:38 vm00 bash[105927]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:27:38.376 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:38 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1-activate[105940]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1 2026-03-10T13:27:38.376 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:38 vm00 bash[105927]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1 2026-03-10T13:27:38.376 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:38 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1-activate[105940]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-9712fe18-cfbd-4537-8e2d-ca31bf24dba2/osd-block-31dc7b09-f48f-4ec2-8ad6-69f3b68a5138 --path /var/lib/ceph/osd/ceph-1 --no-mon-config 2026-03-10T13:27:38.376 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:38 vm00 bash[105927]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-9712fe18-cfbd-4537-8e2d-ca31bf24dba2/osd-block-31dc7b09-f48f-4ec2-8ad6-69f3b68a5138 --path /var/lib/ceph/osd/ceph-1 --no-mon-config 2026-03-10T13:27:38.376 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:38 vm00 ceph-mon[96293]: osdmap e101: 8 total, 7 up, 8 in 2026-03-10T13:27:38.377 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:38 vm00 ceph-mon[96293]: pgmap v29: 161 pgs: 7 stale+active+undersized, 15 stale+active+clean, 3 stale+active+undersized+degraded, 30 active+undersized, 18 active+undersized+degraded, 88 active+clean; 457 KiB data, 123 MiB used, 160 GiB / 160 GiB avail; 68/627 objects degraded (10.845%) 2026-03-10T13:27:38.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:38 vm08 ceph-mon[82639]: osdmap e101: 8 total, 7 up, 8 in 2026-03-10T13:27:38.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:38 vm08 ceph-mon[82639]: pgmap v29: 161 pgs: 7 stale+active+undersized, 15 stale+active+clean, 3 stale+active+undersized+degraded, 30 active+undersized, 18 active+undersized+degraded, 88 active+clean; 457 KiB data, 123 MiB used, 160 GiB / 160 GiB avail; 68/627 objects degraded (10.845%) 2026-03-10T13:27:38.753 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:38 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1-activate[105940]: Running command: /usr/bin/ln -snf /dev/ceph-9712fe18-cfbd-4537-8e2d-ca31bf24dba2/osd-block-31dc7b09-f48f-4ec2-8ad6-69f3b68a5138 /var/lib/ceph/osd/ceph-1/block 2026-03-10T13:27:38.753 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:38 vm00 bash[105927]: Running command: /usr/bin/ln -snf /dev/ceph-9712fe18-cfbd-4537-8e2d-ca31bf24dba2/osd-block-31dc7b09-f48f-4ec2-8ad6-69f3b68a5138 /var/lib/ceph/osd/ceph-1/block 2026-03-10T13:27:38.753 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:38 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1-activate[105940]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-1/block 2026-03-10T13:27:38.753 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:38 vm00 bash[105927]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-1/block 2026-03-10T13:27:38.753 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:38 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1-activate[105940]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-10T13:27:38.753 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:38 vm00 bash[105927]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-10T13:27:38.753 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:38 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1-activate[105940]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1 2026-03-10T13:27:38.753 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:38 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1-activate[105940]: --> ceph-volume lvm activate successful for osd ID: 1 2026-03-10T13:27:38.753 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:38 vm00 bash[105927]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1 2026-03-10T13:27:38.753 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:38 vm00 bash[105927]: --> ceph-volume lvm activate successful for osd ID: 1 2026-03-10T13:27:38.753 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:38 vm00 podman[105927]: 2026-03-10 13:27:38.402881906 +0000 UTC m=+1.219253111 container died ac585739fb8ad2b34ca6014f9573744821b78a09584c8476cb3bc9ffdcb0e26a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1-activate, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, ceph=True, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T13:27:38.753 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:38 vm00 podman[105927]: 2026-03-10 13:27:38.422899078 +0000 UTC m=+1.239270283 container remove ac585739fb8ad2b34ca6014f9573744821b78a09584c8476cb3bc9ffdcb0e26a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1-activate, CEPH_REF=squid, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0) 2026-03-10T13:27:38.753 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:38 vm00 podman[106174]: 2026-03-10 13:27:38.560332021 +0000 UTC m=+0.035585066 container create dc65e199e9ebe6db35777e2d4a16bd3caa1e65053b05529aa4472e815e9dd88a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T13:27:38.753 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:38 vm00 podman[106174]: 2026-03-10 13:27:38.593816544 +0000 UTC m=+0.069069589 container init dc65e199e9ebe6db35777e2d4a16bd3caa1e65053b05529aa4472e815e9dd88a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, io.buildah.version=1.41.3, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS) 2026-03-10T13:27:38.753 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:38 vm00 podman[106174]: 2026-03-10 13:27:38.598719623 +0000 UTC m=+0.073972658 container start dc65e199e9ebe6db35777e2d4a16bd3caa1e65053b05529aa4472e815e9dd88a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid) 2026-03-10T13:27:38.753 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:38 vm00 bash[106174]: dc65e199e9ebe6db35777e2d4a16bd3caa1e65053b05529aa4472e815e9dd88a 2026-03-10T13:27:38.753 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:38 vm00 podman[106174]: 2026-03-10 13:27:38.553284228 +0000 UTC m=+0.028537273 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:27:38.753 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:38 vm00 systemd[1]: Started Ceph osd.1 for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:27:39.253 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:38 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1[106185]: 2026-03-10T13:27:38.940+0000 7fe945cf6740 -1 Falling back to public interface 2026-03-10T13:27:39.564 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:39 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1[106185]: 2026-03-10T13:27:39.561+0000 7fe945cf6740 -1 osd.1 0 read_superblock omap replica is missing. 2026-03-10T13:27:39.849 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:39 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:39.849 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:39 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:39.849 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:39 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:39.849 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:39 vm00 ceph-mon[94470]: from='osd.1 [v2:192.168.123.100:6810/1672819278,v1:192.168.123.100:6811/1672819278]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T13:27:39.849 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:39 vm00 ceph-mon[94470]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T13:27:39.849 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:39 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:39.849 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:39 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:39.849 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:39 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:39.849 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:39 vm00 ceph-mon[96293]: from='osd.1 [v2:192.168.123.100:6810/1672819278,v1:192.168.123.100:6811/1672819278]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T13:27:39.849 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:39 vm00 ceph-mon[96293]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T13:27:39.849 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:39 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1[106185]: 2026-03-10T13:27:39.602+0000 7fe945cf6740 -1 osd.1 99 log_to_monitors true 2026-03-10T13:27:40.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:39 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:40.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:39 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:40.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:39 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:40.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:39 vm08 ceph-mon[82639]: from='osd.1 [v2:192.168.123.100:6810/1672819278,v1:192.168.123.100:6811/1672819278]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T13:27:40.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:39 vm08 ceph-mon[82639]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T13:27:40.918 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:40 vm00 ceph-mon[94470]: pgmap v30: 161 pgs: 41 active+undersized, 22 active+undersized+degraded, 98 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 85/627 objects degraded (13.557%) 2026-03-10T13:27:40.918 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:40 vm00 ceph-mon[94470]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-10T13:27:40.918 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:40 vm00 ceph-mon[94470]: osdmap e102: 8 total, 7 up, 8 in 2026-03-10T13:27:40.918 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:40 vm00 ceph-mon[94470]: from='osd.1 [v2:192.168.123.100:6810/1672819278,v1:192.168.123.100:6811/1672819278]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:27:40.918 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:40 vm00 ceph-mon[94470]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:27:40.918 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:40 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:40.918 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:40 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:40.918 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:40 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:40.919 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:40 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:40.919 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:40 vm00 ceph-mon[96293]: pgmap v30: 161 pgs: 41 active+undersized, 22 active+undersized+degraded, 98 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 85/627 objects degraded (13.557%) 2026-03-10T13:27:40.919 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:40 vm00 ceph-mon[96293]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-10T13:27:40.919 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:40 vm00 ceph-mon[96293]: osdmap e102: 8 total, 7 up, 8 in 2026-03-10T13:27:40.919 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:40 vm00 ceph-mon[96293]: from='osd.1 [v2:192.168.123.100:6810/1672819278,v1:192.168.123.100:6811/1672819278]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:27:40.919 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:40 vm00 ceph-mon[96293]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:27:40.919 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:40 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:40.919 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:40 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:40.919 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:40 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:40.919 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:40 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:40.919 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:27:40 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1[106185]: 2026-03-10T13:27:40.715+0000 7fe93d2a0640 -1 osd.1 99 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T13:27:41.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:40 vm08 ceph-mon[82639]: pgmap v30: 161 pgs: 41 active+undersized, 22 active+undersized+degraded, 98 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 85/627 objects degraded (13.557%) 2026-03-10T13:27:41.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:40 vm08 ceph-mon[82639]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-10T13:27:41.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:40 vm08 ceph-mon[82639]: osdmap e102: 8 total, 7 up, 8 in 2026-03-10T13:27:41.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:40 vm08 ceph-mon[82639]: from='osd.1 [v2:192.168.123.100:6810/1672819278,v1:192.168.123.100:6811/1672819278]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:27:41.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:40 vm08 ceph-mon[82639]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:27:41.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:40 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:41.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:40 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:41.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:40 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:41.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:40 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:42.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:41 vm00 ceph-mon[94470]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:27:42.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:41 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:42.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:41 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:42.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:41 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:42.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:41 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:27:42.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:41 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:42.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:41 vm00 ceph-mon[96293]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:27:42.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:41 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:42.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:41 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:42.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:41 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:42.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:41 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:27:42.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:41 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:42.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:41 vm08 ceph-mon[82639]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:27:42.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:41 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:42.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:41 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:42.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:41 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:42.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:41 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:27:42.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:41 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: OSD bench result of 24745.331156 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.1. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-10T13:27:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: pgmap v32: 161 pgs: 41 active+undersized, 22 active+undersized+degraded, 98 active+clean; 457 KiB data, 143 MiB used, 160 GiB / 160 GiB avail; 85/627 objects degraded (13.557%) 2026-03-10T13:27:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T13:27:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T13:27:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: osd.1 [v2:192.168.123.100:6810/1672819278,v1:192.168.123.100:6811/1672819278] boot 2026-03-10T13:27:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: osdmap e103: 8 total, 8 up, 8 in 2026-03-10T13:27:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:27:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all nvmeof 2026-03-10T13:27:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: Upgrade: Finalizing container_image settings 2026-03-10T13:27:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-10T13:27:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-10T13:27:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-10T13:27:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-10T13:27:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-10T13:27:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-10T13:27:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-10T13:27:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-10T13:27:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-10T13:27:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-10T13:27:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-10T13:27:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-10T13:27:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-10T13:27:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-10T13:27:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-10T13:27:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-10T13:27:43.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-10T13:27:43.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-10T13:27:43.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-10T13:27:43.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-10T13:27:43.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-10T13:27:43.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-10T13:27:43.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-10T13:27:43.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-10T13:27:43.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: Upgrade: Complete! 2026-03-10T13:27:43.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-10T13:27:43.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-10T13:27:43.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-10T13:27:43.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:43.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:27:43.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:43.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:43.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:27:43.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:43.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:43.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:27:43.022 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:42 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:43.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: OSD bench result of 24745.331156 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.1. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-10T13:27:43.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: pgmap v32: 161 pgs: 41 active+undersized, 22 active+undersized+degraded, 98 active+clean; 457 KiB data, 143 MiB used, 160 GiB / 160 GiB avail; 85/627 objects degraded (13.557%) 2026-03-10T13:27:43.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T13:27:43.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T13:27:43.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: osd.1 [v2:192.168.123.100:6810/1672819278,v1:192.168.123.100:6811/1672819278] boot 2026-03-10T13:27:43.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: osdmap e103: 8 total, 8 up, 8 in 2026-03-10T13:27:43.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:27:43.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:43.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:43.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all nvmeof 2026-03-10T13:27:43.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:43.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:43.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: Upgrade: Finalizing container_image settings 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: Upgrade: Complete! 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:43.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: OSD bench result of 24745.331156 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.1. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: pgmap v32: 161 pgs: 41 active+undersized, 22 active+undersized+degraded, 98 active+clean; 457 KiB data, 143 MiB used, 160 GiB / 160 GiB avail; 85/627 objects degraded (13.557%) 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: osd.1 [v2:192.168.123.100:6810/1672819278,v1:192.168.123.100:6811/1672819278] boot 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: osdmap e103: 8 total, 8 up, 8 in 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all nvmeof 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: Upgrade: Finalizing container_image settings 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:27:43.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: Upgrade: Complete! 2026-03-10T13:27:43.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-10T13:27:43.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-10T13:27:43.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-10T13:27:43.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:43.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:27:43.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:43.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:27:43.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:27:43.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:43.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:43.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:27:43.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:42 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:44.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:43 vm00 ceph-mon[96293]: osdmap e104: 8 total, 8 up, 8 in 2026-03-10T13:27:44.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:43 vm00 ceph-mon[96293]: Health check update: Degraded data redundancy: 85/627 objects degraded (13.557%), 22 pgs degraded (PG_DEGRADED) 2026-03-10T13:27:44.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:43 vm00 ceph-mon[94470]: osdmap e104: 8 total, 8 up, 8 in 2026-03-10T13:27:44.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:43 vm00 ceph-mon[94470]: Health check update: Degraded data redundancy: 85/627 objects degraded (13.557%), 22 pgs degraded (PG_DEGRADED) 2026-03-10T13:27:44.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:43 vm08 ceph-mon[82639]: osdmap e104: 8 total, 8 up, 8 in 2026-03-10T13:27:44.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:43 vm08 ceph-mon[82639]: Health check update: Degraded data redundancy: 85/627 objects degraded (13.557%), 22 pgs degraded (PG_DEGRADED) 2026-03-10T13:27:45.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:44 vm08 ceph-mon[82639]: pgmap v35: 161 pgs: 41 active+undersized, 22 active+undersized+degraded, 98 active+clean; 457 KiB data, 143 MiB used, 160 GiB / 160 GiB avail; 85/627 objects degraded (13.557%) 2026-03-10T13:27:45.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:44 vm00 ceph-mon[94470]: pgmap v35: 161 pgs: 41 active+undersized, 22 active+undersized+degraded, 98 active+clean; 457 KiB data, 143 MiB used, 160 GiB / 160 GiB avail; 85/627 objects degraded (13.557%) 2026-03-10T13:27:45.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:44 vm00 ceph-mon[96293]: pgmap v35: 161 pgs: 41 active+undersized, 22 active+undersized+degraded, 98 active+clean; 457 KiB data, 143 MiB used, 160 GiB / 160 GiB avail; 85/627 objects degraded (13.557%) 2026-03-10T13:27:46.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:45 vm00 ceph-mon[94470]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 85/627 objects degraded (13.557%), 22 pgs degraded) 2026-03-10T13:27:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:45 vm00 ceph-mon[94470]: Cluster is now healthy 2026-03-10T13:27:46.003 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:27:45 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:27:45] "GET /metrics HTTP/1.1" 200 37534 "" "Prometheus/2.51.0" 2026-03-10T13:27:46.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:45 vm00 ceph-mon[96293]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 85/627 objects degraded (13.557%), 22 pgs degraded) 2026-03-10T13:27:46.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:45 vm00 ceph-mon[96293]: Cluster is now healthy 2026-03-10T13:27:46.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:45 vm08 ceph-mon[82639]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 85/627 objects degraded (13.557%), 22 pgs degraded) 2026-03-10T13:27:46.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:45 vm08 ceph-mon[82639]: Cluster is now healthy 2026-03-10T13:27:47.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:46 vm00 ceph-mon[94470]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:27:47.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:46 vm00 ceph-mon[96293]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:27:47.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:27:46 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:27:46.993Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:27:47.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:27:46 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:27:46.993Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:27:47.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:46 vm08 ceph-mon[82639]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:27:48.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:48 vm00 ceph-mon[96293]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 672 B/s rd, 0 op/s 2026-03-10T13:27:48.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:48 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:48.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:48 vm00 ceph-mon[94470]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 672 B/s rd, 0 op/s 2026-03-10T13:27:48.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:48 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:48.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:48 vm08 ceph-mon[82639]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 672 B/s rd, 0 op/s 2026-03-10T13:27:48.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:48 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:50.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:50 vm00 ceph-mon[94470]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:27:50.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:50 vm00 ceph-mon[96293]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:27:50.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:50 vm08 ceph-mon[82639]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:27:51.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:51 vm00 ceph-mon[94470]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:27:51.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:51 vm00 ceph-mon[96293]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:27:51.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:51 vm08 ceph-mon[82639]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:27:52.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:52 vm00 ceph-mon[94470]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-10T13:27:52.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:52 vm00 ceph-mon[96293]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-10T13:27:52.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:52 vm08 ceph-mon[82639]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-10T13:27:54.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:54 vm00 ceph-mon[94470]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 969 B/s rd, 0 op/s 2026-03-10T13:27:54.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:54 vm00 ceph-mon[96293]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 969 B/s rd, 0 op/s 2026-03-10T13:27:54.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:54 vm08 ceph-mon[82639]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 969 B/s rd, 0 op/s 2026-03-10T13:27:56.002 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:27:55 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:27:55] "GET /metrics HTTP/1.1" 200 37534 "" "Prometheus/2.51.0" 2026-03-10T13:27:56.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:56 vm00 ceph-mon[94470]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:27:56.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:56 vm00 ceph-mon[96293]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:27:56.770 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:27:56 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=infra.usagestats t=2026-03-10T13:27:56.463322463Z level=info msg="Usage stats are ready to report" 2026-03-10T13:27:56.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:56 vm08 ceph-mon[82639]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:27:57.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:27:56 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:27:56.993Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:27:57.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:27:56 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:27:56.994Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:27:58.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:58 vm00 ceph-mon[94470]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:27:58.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:58 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:58.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:58 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:27:58.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:58 vm00 ceph-mon[96293]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:27:58.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:58 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:58.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:58 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:27:58.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:58 vm08 ceph-mon[82639]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:27:58.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:58 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:27:58.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:58 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:27:59.155 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-10T13:27:59.357 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:27:59 vm00 ceph-mon[96293]: from='client.54151 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:27:59.358 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:27:59 vm00 ceph-mon[94470]: from='client.54151 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:27:59.643 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T13:27:59.643 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (14m) 19s ago 21m 26.4M - 0.25.0 c8568f914cd2 12fde3cf83cb 2026-03-10T13:27:59.643 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm08 *:3000 running (2m) 60s ago 20m 74.2M - 10.4.0 c8b91775d855 960e32589e98 2026-03-10T13:27:59.643 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dezodo vm00 running (2m) 19s ago 20m 49.1M - 3.5 e1d6a67b021e 630bf6d4e7f3 2026-03-10T13:27:59.643 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm08 *:8443,9283,8765 running (2m) 60s ago 22m 486M - 19.2.3-678-ge911bdeb 654f31e6858e 31b91eebc856 2026-03-10T13:27:59.643 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (9m) 19s ago 23m 550M - 19.2.3-678-ge911bdeb 654f31e6858e 5bc576d4d32b 2026-03-10T13:27:59.644 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (66s) 19s ago 23m 45.5M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 981df6371890 2026-03-10T13:27:59.644 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm08 running (101s) 60s ago 22m 37.6M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 8cceb678a9ee 2026-03-10T13:27:59.644 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (57s) 19s ago 22m 40.5M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 43deda66dee3 2026-03-10T13:27:59.644 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (14m) 19s ago 21m 10.9M - 1.7.0 72c9c2088986 bcf883401619 2026-03-10T13:27:59.644 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm08 *:9100 running (14m) 60s ago 21m 10.0M - 1.7.0 72c9c2088986 4ac83f03f818 2026-03-10T13:27:59.644 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (28s) 19s ago 22m 44.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 5fc74f4d2179 2026-03-10T13:27:59.644 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (21s) 19s ago 22m 13.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e dc65e199e9eb 2026-03-10T13:27:59.644 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (22m) 19s ago 22m 57.1M 4096M 17.2.0 e1d6a67b021e 1e417e82c2b9 2026-03-10T13:27:59.644 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (22m) 19s ago 22m 55.1M 4096M 17.2.0 e1d6a67b021e 98bb6d7ea69f 2026-03-10T13:27:59.644 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm08 running (21m) 60s ago 21m 54.7M 4096M 17.2.0 e1d6a67b021e e349440ca776 2026-03-10T13:27:59.644 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm08 running (21m) 60s ago 21m 56.5M 4096M 17.2.0 e1d6a67b021e 9a6a2a67cc99 2026-03-10T13:27:59.644 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm08 running (21m) 60s ago 21m 52.2M 4096M 17.2.0 e1d6a67b021e c27676916d52 2026-03-10T13:27:59.644 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm08 running (21m) 60s ago 21m 54.0M 4096M 17.2.0 e1d6a67b021e bf67951990a5 2026-03-10T13:27:59.644 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm08 *:9095 running (2m) 60s ago 21m 48.2M - 2.51.0 1d3b7f56885b 3f9b2d0821c9 2026-03-10T13:27:59.644 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.tvlvzo vm00 *:8000 running (20m) 19s ago 20m 97.7M - 17.2.0 e1d6a67b021e be24eac16807 2026-03-10T13:27:59.644 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm08.ljayps vm08 *:8000 running (20m) 60s ago 20m 94.3M - 17.2.0 e1d6a67b021e 67b525427823 2026-03-10T13:27:59.688 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.osd | length == 2'"'"'' 2026-03-10T13:27:59.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:27:59 vm08 ceph-mon[82639]: from='client.54151 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:00.207 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T13:28:00.260 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '"'"'.up_to_date | length == 7'"'"'' 2026-03-10T13:28:00.462 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:00 vm00 ceph-mon[94470]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:28:00.462 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:00 vm00 ceph-mon[94470]: from='client.44205 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:00.462 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:00 vm00 ceph-mon[94470]: from='client.? 192.168.123.100:0/819208963' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:00.462 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:00 vm00 ceph-mon[96293]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:28:00.462 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:00 vm00 ceph-mon[96293]: from='client.44205 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:00.462 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:00 vm00 ceph-mon[96293]: from='client.? 192.168.123.100:0/819208963' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:00.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:00 vm08 ceph-mon[82639]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:28:00.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:00 vm08 ceph-mon[82639]: from='client.44205 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:00.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:00 vm08 ceph-mon[82639]: from='client.? 192.168.123.100:0/819208963' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:01.021 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T13:28:01.064 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade status' 2026-03-10T13:28:01.432 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:01 vm00 ceph-mon[94470]: from='client.34250 -' entity='client.admin' cmd=[{"prefix": "orch upgrade check", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:01.432 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:01 vm00 ceph-mon[94470]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:28:01.432 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:01 vm00 ceph-mon[96293]: from='client.34250 -' entity='client.admin' cmd=[{"prefix": "orch upgrade check", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:01.432 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:01 vm00 ceph-mon[96293]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:28:01.589 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:28:01.589 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": null, 2026-03-10T13:28:01.589 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": false, 2026-03-10T13:28:01.589 INFO:teuthology.orchestra.run.vm00.stdout: "which": "", 2026-03-10T13:28:01.589 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [], 2026-03-10T13:28:01.589 INFO:teuthology.orchestra.run.vm00.stdout: "progress": null, 2026-03-10T13:28:01.589 INFO:teuthology.orchestra.run.vm00.stdout: "message": "", 2026-03-10T13:28:01.589 INFO:teuthology.orchestra.run.vm00.stdout: "is_paused": false 2026-03-10T13:28:01.589 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:28:01.640 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph health detail' 2026-03-10T13:28:01.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:01 vm08 ceph-mon[82639]: from='client.34250 -' entity='client.admin' cmd=[{"prefix": "orch upgrade check", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:01.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:01 vm08 ceph-mon[82639]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:28:02.142 INFO:teuthology.orchestra.run.vm00.stdout:HEALTH_OK 2026-03-10T13:28:02.189 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types crash,osd --limit 1' 2026-03-10T13:28:02.397 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:02 vm00 ceph-mon[96293]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:28:02.397 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:02 vm00 ceph-mon[96293]: from='client.44220 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:02.397 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:02 vm00 ceph-mon[96293]: from='client.? 192.168.123.100:0/3374316740' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T13:28:02.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:02 vm00 ceph-mon[94470]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:28:02.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:02 vm00 ceph-mon[94470]: from='client.44220 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:02.398 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:02 vm00 ceph-mon[94470]: from='client.? 192.168.123.100:0/3374316740' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T13:28:02.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:02 vm08 ceph-mon[82639]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:28:02.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:02 vm08 ceph-mon[82639]: from='client.44220 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:02.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:02 vm08 ceph-mon[82639]: from='client.? 192.168.123.100:0/3374316740' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T13:28:02.971 INFO:teuthology.orchestra.run.vm00.stdout:Initiating upgrade to quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:28:03.019 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'while ceph orch upgrade status | jq '"'"'.in_progress'"'"' | grep true && ! ceph orch upgrade status | jq '"'"'.message'"'"' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done' 2026-03-10T13:28:03.747 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T13:28:03.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:03 vm00 ceph-mon[94470]: from='client.44232 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "crash,osd", "limit": 1, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:03.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:03 vm00 ceph-mon[94470]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:28:03.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:03 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:03.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:03 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:28:03.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:03 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:28:03.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:03 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:03.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:03 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:03.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:03 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:28:03.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:03 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:03.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:03 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:03.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:03 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:03.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:03 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:03.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:03 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:03.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:03 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:03.999 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:03 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T13:28:03.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:03 vm00 ceph-mon[96293]: from='client.44232 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "crash,osd", "limit": 1, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:03.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:03 vm00 ceph-mon[96293]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:28:03.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:03 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:03.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:03 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:28:03.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:03 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:28:03.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:03 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:03.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:03 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:03.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:03 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:28:03.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:03 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:03.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:03 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:03.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:03 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:03.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:03 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:03.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:03 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:03.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:03 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:03.999 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:03 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T13:28:04.240 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T13:28:04.240 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (14m) 24s ago 21m 26.4M - 0.25.0 c8568f914cd2 12fde3cf83cb 2026-03-10T13:28:04.240 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm08 *:3000 running (2m) 65s ago 20m 74.2M - 10.4.0 c8b91775d855 960e32589e98 2026-03-10T13:28:04.240 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dezodo vm00 running (2m) 24s ago 20m 49.1M - 3.5 e1d6a67b021e 630bf6d4e7f3 2026-03-10T13:28:04.240 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm08 *:8443,9283,8765 running (2m) 65s ago 22m 486M - 19.2.3-678-ge911bdeb 654f31e6858e 31b91eebc856 2026-03-10T13:28:04.240 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (9m) 24s ago 23m 550M - 19.2.3-678-ge911bdeb 654f31e6858e 5bc576d4d32b 2026-03-10T13:28:04.240 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (70s) 24s ago 23m 45.5M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 981df6371890 2026-03-10T13:28:04.241 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm08 running (106s) 65s ago 22m 37.6M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 8cceb678a9ee 2026-03-10T13:28:04.241 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (62s) 24s ago 23m 40.5M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 43deda66dee3 2026-03-10T13:28:04.241 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (14m) 24s ago 21m 10.9M - 1.7.0 72c9c2088986 bcf883401619 2026-03-10T13:28:04.241 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm08 *:9100 running (14m) 65s ago 21m 10.0M - 1.7.0 72c9c2088986 4ac83f03f818 2026-03-10T13:28:04.241 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (32s) 24s ago 22m 44.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 5fc74f4d2179 2026-03-10T13:28:04.241 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (25s) 24s ago 22m 13.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e dc65e199e9eb 2026-03-10T13:28:04.241 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (22m) 24s ago 22m 57.1M 4096M 17.2.0 e1d6a67b021e 1e417e82c2b9 2026-03-10T13:28:04.241 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (22m) 24s ago 22m 55.1M 4096M 17.2.0 e1d6a67b021e 98bb6d7ea69f 2026-03-10T13:28:04.241 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm08 running (21m) 65s ago 21m 54.7M 4096M 17.2.0 e1d6a67b021e e349440ca776 2026-03-10T13:28:04.241 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm08 running (21m) 65s ago 21m 56.5M 4096M 17.2.0 e1d6a67b021e 9a6a2a67cc99 2026-03-10T13:28:04.241 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm08 running (21m) 65s ago 21m 52.2M 4096M 17.2.0 e1d6a67b021e c27676916d52 2026-03-10T13:28:04.241 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm08 running (21m) 65s ago 21m 54.0M 4096M 17.2.0 e1d6a67b021e bf67951990a5 2026-03-10T13:28:04.241 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm08 *:9095 running (2m) 65s ago 21m 48.2M - 2.51.0 1d3b7f56885b 3f9b2d0821c9 2026-03-10T13:28:04.241 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.tvlvzo vm00 *:8000 running (20m) 24s ago 20m 97.7M - 17.2.0 e1d6a67b021e be24eac16807 2026-03-10T13:28:04.241 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm08.ljayps vm08 *:8000 running (20m) 65s ago 20m 94.3M - 17.2.0 e1d6a67b021e 67b525427823 2026-03-10T13:28:04.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:03 vm08 ceph-mon[82639]: from='client.44232 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "crash,osd", "limit": 1, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:04.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:03 vm08 ceph-mon[82639]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:28:04.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:03 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:04.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:03 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:28:04.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:03 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:28:04.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:03 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:04.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:03 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:04.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:03 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:28:04.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:03 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:04.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:03 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:04.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:03 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:04.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:03 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:04.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:03 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:04.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:03 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:04.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:03 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T13:28:04.543 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:28:04.543 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T13:28:04.543 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-10T13:28:04.543 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:28:04.543 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T13:28:04.543 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T13:28:04.543 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:28:04.543 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T13:28:04.543 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 6, 2026-03-10T13:28:04.543 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T13:28:04.543 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:28:04.543 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T13:28:04.543 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-10T13:28:04.543 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:28:04.543 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T13:28:04.543 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8, 2026-03-10T13:28:04.543 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 7 2026-03-10T13:28:04.543 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T13:28:04.543 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:28:04.811 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:28:04.811 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T13:28:04.811 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": true, 2026-03-10T13:28:04.811 INFO:teuthology.orchestra.run.vm00.stdout: "which": "Upgrading daemons of type(s) crash,osd. Upgrade limited to 1 daemons (1 remaining).", 2026-03-10T13:28:04.811 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [], 2026-03-10T13:28:04.811 INFO:teuthology.orchestra.run.vm00.stdout: "progress": "2/8 daemons upgraded", 2026-03-10T13:28:04.811 INFO:teuthology.orchestra.run.vm00.stdout: "message": "Currently upgrading osd daemons", 2026-03-10T13:28:04.811 INFO:teuthology.orchestra.run.vm00.stdout: "is_paused": false 2026-03-10T13:28:04.811 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:28:04.926 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:04 vm00 systemd[1]: Stopping Ceph osd.2 for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:28:05.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:04 vm00 ceph-mon[94470]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:28:05.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:04 vm00 ceph-mon[94470]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:28:05.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:04 vm00 ceph-mon[94470]: from='client.44238 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:05.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:04 vm00 ceph-mon[94470]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-10T13:28:05.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:04 vm00 ceph-mon[94470]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-10T13:28:05.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:04 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all mgr 2026-03-10T13:28:05.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:04 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all mon 2026-03-10T13:28:05.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:04 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all crash 2026-03-10T13:28:05.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:04 vm00 ceph-mon[94470]: from='mon.2 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T13:28:05.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:04 vm00 ceph-mon[94470]: Upgrade: osd.2 is safe to restart 2026-03-10T13:28:05.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:04 vm00 ceph-mon[94470]: from='client.54172 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:05.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:04 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:05.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:04 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-10T13:28:05.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:04 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:28:05.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:04 vm00 ceph-mon[94470]: from='client.? 192.168.123.100:0/1439323407' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:05.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:04 vm00 ceph-mon[96293]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:28:05.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:04 vm00 ceph-mon[96293]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:28:05.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:04 vm00 ceph-mon[96293]: from='client.44238 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:05.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:04 vm00 ceph-mon[96293]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-10T13:28:05.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:04 vm00 ceph-mon[96293]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-10T13:28:05.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:04 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all mgr 2026-03-10T13:28:05.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:04 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all mon 2026-03-10T13:28:05.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:04 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all crash 2026-03-10T13:28:05.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:04 vm00 ceph-mon[96293]: from='mon.2 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T13:28:05.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:04 vm00 ceph-mon[96293]: Upgrade: osd.2 is safe to restart 2026-03-10T13:28:05.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:04 vm00 ceph-mon[96293]: from='client.54172 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:05.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:04 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:05.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:04 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-10T13:28:05.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:04 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:28:05.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:04 vm00 ceph-mon[96293]: from='client.? 192.168.123.100:0/1439323407' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:05.254 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:05 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2[60179]: 2026-03-10T13:28:04.998+0000 7fb3cf4ba700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T13:28:05.254 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:05 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2[60179]: 2026-03-10T13:28:04.998+0000 7fb3cf4ba700 -1 osd.2 104 *** Got signal Terminated *** 2026-03-10T13:28:05.254 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:05 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2[60179]: 2026-03-10T13:28:04.998+0000 7fb3cf4ba700 -1 osd.2 104 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T13:28:05.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:04 vm08 ceph-mon[82639]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:28:05.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:04 vm08 ceph-mon[82639]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:28:05.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:04 vm08 ceph-mon[82639]: from='client.44238 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:05.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:04 vm08 ceph-mon[82639]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-10T13:28:05.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:04 vm08 ceph-mon[82639]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-10T13:28:05.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:04 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all mgr 2026-03-10T13:28:05.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:04 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all mon 2026-03-10T13:28:05.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:04 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all crash 2026-03-10T13:28:05.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:04 vm08 ceph-mon[82639]: from='mon.2 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T13:28:05.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:04 vm08 ceph-mon[82639]: Upgrade: osd.2 is safe to restart 2026-03-10T13:28:05.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:04 vm08 ceph-mon[82639]: from='client.54172 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:05.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:04 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:05.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:04 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-10T13:28:05.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:04 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:28:05.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:04 vm08 ceph-mon[82639]: from='client.? 192.168.123.100:0/1439323407' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:05.980 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:05 vm00 ceph-mon[94470]: Upgrade: Updating osd.2 2026-03-10T13:28:05.980 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:28:05 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:28:05] "GET /metrics HTTP/1.1" 200 37673 "" "Prometheus/2.51.0" 2026-03-10T13:28:06.233 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:05 vm00 ceph-mon[94470]: Deploying daemon osd.2 on vm00 2026-03-10T13:28:06.233 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:05 vm00 ceph-mon[94470]: from='client.54178 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:06.233 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:05 vm00 ceph-mon[94470]: from='client.54184 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:06.233 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:05 vm00 ceph-mon[94470]: osd.2 marked itself down and dead 2026-03-10T13:28:06.233 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:06 vm00 podman[110892]: 2026-03-10 13:28:06.035609509 +0000 UTC m=+1.051693993 container died 1e417e82c2b9515bc82468c5240bc0880cacab911264ee4c5febc1435902292e (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.openshift.expose-services=, release=754, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, io.openshift.tags=base centos centos-stream, maintainer=Guillaume Abrioux , url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, GIT_BRANCH=HEAD, build-date=2022-05-03T08:36:31.336870, distribution-scope=public, io.k8s.display-name=CentOS Stream 8, vendor=Red Hat, Inc., CEPH_POINT_RELEASE=-17.2.0, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, name=centos-stream, GIT_REPO=https://github.com/ceph/ceph-container.git, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, vcs-type=git, GIT_CLEAN=True, architecture=x86_64, ceph=True, io.buildah.version=1.19.8, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, version=8, RELEASE=HEAD, com.redhat.component=centos-stream-container) 2026-03-10T13:28:06.233 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:06 vm00 podman[110892]: 2026-03-10 13:28:06.068615746 +0000 UTC m=+1.084700230 container remove 1e417e82c2b9515bc82468c5240bc0880cacab911264ee4c5febc1435902292e (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2, architecture=x86_64, io.openshift.tags=base centos centos-stream, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_BRANCH=HEAD, GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, RELEASE=HEAD, vcs-type=git, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, build-date=2022-05-03T08:36:31.336870, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, name=centos-stream, distribution-scope=public, release=754, io.k8s.display-name=CentOS Stream 8, maintainer=Guillaume Abrioux , io.buildah.version=1.19.8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vendor=Red Hat, Inc., ceph=True, io.openshift.expose-services=, version=8, CEPH_POINT_RELEASE=-17.2.0, GIT_CLEAN=True, com.redhat.component=centos-stream-container) 2026-03-10T13:28:06.233 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:06 vm00 bash[110892]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2 2026-03-10T13:28:06.233 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:06 vm00 podman[110958]: 2026-03-10 13:28:06.201646187 +0000 UTC m=+0.016366891 container create aa7e46ed00de407611e1f0b036e2271d8166c625eb50643a8881ddef2b411046 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2-deactivate, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T13:28:06.234 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:05 vm00 ceph-mon[96293]: Upgrade: Updating osd.2 2026-03-10T13:28:06.234 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:05 vm00 ceph-mon[96293]: Deploying daemon osd.2 on vm00 2026-03-10T13:28:06.234 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:05 vm00 ceph-mon[96293]: from='client.54178 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:06.234 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:05 vm00 ceph-mon[96293]: from='client.54184 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:06.234 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:05 vm00 ceph-mon[96293]: osd.2 marked itself down and dead 2026-03-10T13:28:06.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:05 vm08 ceph-mon[82639]: Upgrade: Updating osd.2 2026-03-10T13:28:06.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:05 vm08 ceph-mon[82639]: Deploying daemon osd.2 on vm00 2026-03-10T13:28:06.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:05 vm08 ceph-mon[82639]: from='client.54178 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:06.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:05 vm08 ceph-mon[82639]: from='client.54184 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:06.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:05 vm08 ceph-mon[82639]: osd.2 marked itself down and dead 2026-03-10T13:28:06.503 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:06 vm00 podman[110958]: 2026-03-10 13:28:06.244111481 +0000 UTC m=+0.058832195 container init aa7e46ed00de407611e1f0b036e2271d8166c625eb50643a8881ddef2b411046 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2-deactivate, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, ceph=True, org.label-schema.build-date=20260223) 2026-03-10T13:28:06.503 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:06 vm00 podman[110958]: 2026-03-10 13:28:06.249087786 +0000 UTC m=+0.063808480 container start aa7e46ed00de407611e1f0b036e2271d8166c625eb50643a8881ddef2b411046 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2-deactivate, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T13:28:06.503 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:06 vm00 podman[110958]: 2026-03-10 13:28:06.249961182 +0000 UTC m=+0.064681886 container attach aa7e46ed00de407611e1f0b036e2271d8166c625eb50643a8881ddef2b411046 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2-deactivate, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, ceph=True, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-10T13:28:06.503 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:06 vm00 podman[110958]: 2026-03-10 13:28:06.194737383 +0000 UTC m=+0.009458097 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:28:06.503 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:06 vm00 podman[110977]: 2026-03-10 13:28:06.391249621 +0000 UTC m=+0.010609141 container died aa7e46ed00de407611e1f0b036e2271d8166c625eb50643a8881ddef2b411046 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2-deactivate, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3) 2026-03-10T13:28:06.503 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:06 vm00 podman[110977]: 2026-03-10 13:28:06.406102134 +0000 UTC m=+0.025461654 container remove aa7e46ed00de407611e1f0b036e2271d8166c625eb50643a8881ddef2b411046 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2-deactivate, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS) 2026-03-10T13:28:06.503 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:06 vm00 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.2.service: Deactivated successfully. 2026-03-10T13:28:06.503 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:06 vm00 systemd[1]: Stopped Ceph osd.2 for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:28:06.503 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:06 vm00 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.2.service: Consumed 11.742s CPU time. 2026-03-10T13:28:06.983 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:06 vm00 systemd[1]: Starting Ceph osd.2 for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:28:06.984 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:06 vm00 podman[111062]: 2026-03-10 13:28:06.697879316 +0000 UTC m=+0.022788440 container create ca74b072377794b543d0d01099963e68bfddd29f9d37780cbdad27b66156716c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2-activate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T13:28:06.984 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:06 vm00 podman[111062]: 2026-03-10 13:28:06.738143581 +0000 UTC m=+0.063052714 container init ca74b072377794b543d0d01099963e68bfddd29f9d37780cbdad27b66156716c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2-activate, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-10T13:28:06.984 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:06 vm00 podman[111062]: 2026-03-10 13:28:06.741654662 +0000 UTC m=+0.066563786 container start ca74b072377794b543d0d01099963e68bfddd29f9d37780cbdad27b66156716c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2-activate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid) 2026-03-10T13:28:06.984 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:06 vm00 podman[111062]: 2026-03-10 13:28:06.742639337 +0000 UTC m=+0.067548461 container attach ca74b072377794b543d0d01099963e68bfddd29f9d37780cbdad27b66156716c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2-activate, CEPH_REF=squid, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T13:28:06.984 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:06 vm00 podman[111062]: 2026-03-10 13:28:06.684442763 +0000 UTC m=+0.009351887 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:28:06.984 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:06 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2-activate[111074]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:28:06.984 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:06 vm00 bash[111062]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:28:06.984 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:06 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2-activate[111074]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:28:06.984 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:06 vm00 bash[111062]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:28:07.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:06 vm00 ceph-mon[96293]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:28:07.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:06 vm00 ceph-mon[96293]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T13:28:07.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:06 vm00 ceph-mon[96293]: osdmap e105: 8 total, 7 up, 8 in 2026-03-10T13:28:07.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:28:06 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:28:06.993Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:28:07.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:28:07 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:28:07.001Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:28:07.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:06 vm00 ceph-mon[94470]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:28:07.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:06 vm00 ceph-mon[94470]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T13:28:07.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:06 vm00 ceph-mon[94470]: osdmap e105: 8 total, 7 up, 8 in 2026-03-10T13:28:07.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:06 vm08 ceph-mon[82639]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:28:07.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:06 vm08 ceph-mon[82639]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T13:28:07.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:06 vm08 ceph-mon[82639]: osdmap e105: 8 total, 7 up, 8 in 2026-03-10T13:28:07.634 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:07 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2-activate[111074]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T13:28:07.634 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:07 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2-activate[111074]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:28:07.634 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:07 vm00 bash[111062]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T13:28:07.634 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:07 vm00 bash[111062]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:28:07.634 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:07 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2-activate[111074]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:28:07.634 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:07 vm00 bash[111062]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:28:07.634 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:07 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2-activate[111074]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2 2026-03-10T13:28:07.634 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:07 vm00 bash[111062]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2 2026-03-10T13:28:07.634 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:07 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2-activate[111074]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-078f7eb3-fd76-41dc-ac9a-ecff5adae971/osd-block-f9f7ad09-367f-410b-9921-f31c456c313d --path /var/lib/ceph/osd/ceph-2 --no-mon-config 2026-03-10T13:28:07.634 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:07 vm00 bash[111062]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-078f7eb3-fd76-41dc-ac9a-ecff5adae971/osd-block-f9f7ad09-367f-410b-9921-f31c456c313d --path /var/lib/ceph/osd/ceph-2 --no-mon-config 2026-03-10T13:28:07.634 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:07 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2-activate[111074]: Running command: /usr/bin/ln -snf /dev/ceph-078f7eb3-fd76-41dc-ac9a-ecff5adae971/osd-block-f9f7ad09-367f-410b-9921-f31c456c313d /var/lib/ceph/osd/ceph-2/block 2026-03-10T13:28:08.000 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:07 vm00 ceph-mon[94470]: osdmap e106: 8 total, 7 up, 8 in 2026-03-10T13:28:08.000 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:07 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:08.000 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:07 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:08.000 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:07 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:08.000 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:07 vm00 bash[111062]: Running command: /usr/bin/ln -snf /dev/ceph-078f7eb3-fd76-41dc-ac9a-ecff5adae971/osd-block-f9f7ad09-367f-410b-9921-f31c456c313d /var/lib/ceph/osd/ceph-2/block 2026-03-10T13:28:08.000 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:07 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2-activate[111074]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-2/block 2026-03-10T13:28:08.000 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:07 vm00 bash[111062]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-2/block 2026-03-10T13:28:08.000 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:07 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2-activate[111074]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-10T13:28:08.000 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:07 vm00 bash[111062]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-10T13:28:08.000 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:07 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2-activate[111074]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2 2026-03-10T13:28:08.000 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:07 vm00 bash[111062]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2 2026-03-10T13:28:08.000 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:07 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2-activate[111074]: --> ceph-volume lvm activate successful for osd ID: 2 2026-03-10T13:28:08.000 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:07 vm00 bash[111062]: --> ceph-volume lvm activate successful for osd ID: 2 2026-03-10T13:28:08.000 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:07 vm00 conmon[111074]: conmon ca74b072377794b543d0 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-ca74b072377794b543d0d01099963e68bfddd29f9d37780cbdad27b66156716c.scope/container/memory.events 2026-03-10T13:28:08.000 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:07 vm00 podman[111062]: 2026-03-10 13:28:07.663247599 +0000 UTC m=+0.988156723 container died ca74b072377794b543d0d01099963e68bfddd29f9d37780cbdad27b66156716c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2-activate, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, io.buildah.version=1.41.3, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T13:28:08.000 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:07 vm00 podman[111062]: 2026-03-10 13:28:07.683047564 +0000 UTC m=+1.007956677 container remove ca74b072377794b543d0d01099963e68bfddd29f9d37780cbdad27b66156716c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2-activate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, CEPH_REF=squid, OSD_FLAVOR=default, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.build-date=20260223) 2026-03-10T13:28:08.000 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:07 vm00 podman[111307]: 2026-03-10 13:28:07.776677417 +0000 UTC m=+0.016683883 container create 706171e0f5c28f2673efdb1742ac342e5eab5b04565f0a11fffe853d8ab54e70 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True) 2026-03-10T13:28:08.000 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:07 vm00 podman[111307]: 2026-03-10 13:28:07.822805959 +0000 UTC m=+0.062812425 container init 706171e0f5c28f2673efdb1742ac342e5eab5b04565f0a11fffe853d8ab54e70 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.build-date=20260223, CEPH_REF=squid, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T13:28:08.000 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:07 vm00 podman[111307]: 2026-03-10 13:28:07.827015269 +0000 UTC m=+0.067021735 container start 706171e0f5c28f2673efdb1742ac342e5eab5b04565f0a11fffe853d8ab54e70 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, io.buildah.version=1.41.3) 2026-03-10T13:28:08.000 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:07 vm00 bash[111307]: 706171e0f5c28f2673efdb1742ac342e5eab5b04565f0a11fffe853d8ab54e70 2026-03-10T13:28:08.000 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:07 vm00 podman[111307]: 2026-03-10 13:28:07.770434791 +0000 UTC m=+0.010441267 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:28:08.000 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:07 vm00 systemd[1]: Started Ceph osd.2 for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:28:08.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:08 vm00 ceph-mon[96293]: osdmap e106: 8 total, 7 up, 8 in 2026-03-10T13:28:08.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:08 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:08.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:08 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:08.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:08 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:08.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:07 vm08 ceph-mon[82639]: osdmap e106: 8 total, 7 up, 8 in 2026-03-10T13:28:08.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:07 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:08.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:07 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:08.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:07 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:08.977 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:08 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2[111318]: 2026-03-10T13:28:08.901+0000 7fd7f259b740 -1 Falling back to public interface 2026-03-10T13:28:09.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:09 vm00 ceph-mon[94470]: pgmap v49: 161 pgs: 13 stale+active+clean, 148 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-10T13:28:09.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:09 vm00 ceph-mon[96293]: pgmap v49: 161 pgs: 13 stale+active+clean, 148 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-10T13:28:09.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:09 vm08 ceph-mon[82639]: pgmap v49: 161 pgs: 13 stale+active+clean, 148 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-10T13:28:09.766 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:09 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2[111318]: 2026-03-10T13:28:09.506+0000 7fd7f259b740 -1 osd.2 0 read_superblock omap replica is missing. 2026-03-10T13:28:09.766 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:09 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2[111318]: 2026-03-10T13:28:09.531+0000 7fd7f259b740 -1 osd.2 104 log_to_monitors true 2026-03-10T13:28:10.019 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:10.019 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:10.019 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:10.019 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:10.019 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:10 vm00 ceph-mon[94470]: from='osd.2 [v2:192.168.123.100:6818/4134418130,v1:192.168.123.100:6819/4134418130]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-10T13:28:10.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:10.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:10.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:10.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:10.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:10 vm08 ceph-mon[82639]: from='osd.2 [v2:192.168.123.100:6818/4134418130,v1:192.168.123.100:6819/4134418130]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-10T13:28:10.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:10.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:10.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:10.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:10.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:10 vm00 ceph-mon[96293]: from='osd.2 [v2:192.168.123.100:6818/4134418130,v1:192.168.123.100:6819/4134418130]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-10T13:28:11.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: pgmap v50: 161 pgs: 29 active+undersized, 2 stale+active+clean, 11 active+undersized+degraded, 119 active+clean; 457 KiB data, 145 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 36/627 objects degraded (5.742%) 2026-03-10T13:28:11.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: Health check failed: Degraded data redundancy: 36/627 objects degraded (5.742%), 11 pgs degraded (PG_DEGRADED) 2026-03-10T13:28:11.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:11.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:11.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:28:11.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:28:11.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:11.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:28:11.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:11.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:11.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:11.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:11.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:11.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:11.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:11.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:11.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:11.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-10T13:28:11.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-10T13:28:11.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-10T13:28:11.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-10T13:28:11.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-10T13:28:11.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-10T13:28:11.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-10T13:28:11.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-10T13:28:11.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-10T13:28:11.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-10T13:28:11.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-10T13:28:11.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-10T13:28:11.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-10T13:28:11.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-10T13:28:11.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-10T13:28:11.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-10T13:28:11.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-10T13:28:11.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-10T13:28:11.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-10T13:28:11.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-10T13:28:11.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-10T13:28:11.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-10T13:28:11.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-10T13:28:11.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-10T13:28:11.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-10T13:28:11.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-10T13:28:11.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-10T13:28:11.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:28:11.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:28:11.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:11.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='osd.2 [v2:192.168.123.100:6818/4134418130,v1:192.168.123.100:6819/4134418130]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-10T13:28:11.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: osdmap e107: 8 total, 7 up, 8 in 2026-03-10T13:28:11.254 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='osd.2 [v2:192.168.123.100:6818/4134418130,v1:192.168.123.100:6819/4134418130]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:28:11.255 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:28:11.255 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:28:11.255 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:11.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: pgmap v50: 161 pgs: 29 active+undersized, 2 stale+active+clean, 11 active+undersized+degraded, 119 active+clean; 457 KiB data, 145 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 36/627 objects degraded (5.742%) 2026-03-10T13:28:11.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: Health check failed: Degraded data redundancy: 36/627 objects degraded (5.742%), 11 pgs degraded (PG_DEGRADED) 2026-03-10T13:28:11.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:11.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:11.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:28:11.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:28:11.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:11.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:28:11.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:11.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:11.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:11.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:11.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:11.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:11.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:11.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:11.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:11.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-10T13:28:11.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-10T13:28:11.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-10T13:28:11.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-10T13:28:11.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-10T13:28:11.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-10T13:28:11.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-10T13:28:11.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-10T13:28:11.255 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='osd.2 [v2:192.168.123.100:6818/4134418130,v1:192.168.123.100:6819/4134418130]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: osdmap e107: 8 total, 7 up, 8 in 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='osd.2 [v2:192.168.123.100:6818/4134418130,v1:192.168.123.100:6819/4134418130]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:28:11.256 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:28:11.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:28:11.257 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:11 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: pgmap v50: 161 pgs: 29 active+undersized, 2 stale+active+clean, 11 active+undersized+degraded, 119 active+clean; 457 KiB data, 145 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 36/627 objects degraded (5.742%) 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: Health check failed: Degraded data redundancy: 36/627 objects degraded (5.742%), 11 pgs degraded (PG_DEGRADED) 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-10T13:28:11.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-10T13:28:11.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-10T13:28:11.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-10T13:28:11.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:28:11.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-10T13:28:11.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-10T13:28:11.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-10T13:28:11.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:28:11.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:28:11.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:11.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='osd.2 [v2:192.168.123.100:6818/4134418130,v1:192.168.123.100:6819/4134418130]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-10T13:28:11.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: osdmap e107: 8 total, 7 up, 8 in 2026-03-10T13:28:11.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='osd.2 [v2:192.168.123.100:6818/4134418130,v1:192.168.123.100:6819/4134418130]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:28:11.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:28:11.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:28:11.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:11 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:11.752 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:28:11 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2[111318]: 2026-03-10T13:28:11.469+0000 7fd7e9b45640 -1 osd.2 104 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T13:28:12.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:12 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all nvmeof 2026-03-10T13:28:12.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:12 vm00 ceph-mon[94470]: Upgrade: Finalizing container_image settings 2026-03-10T13:28:12.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:12 vm00 ceph-mon[94470]: Upgrade: Complete! 2026-03-10T13:28:12.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:12 vm00 ceph-mon[94470]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:28:12.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:12 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all nvmeof 2026-03-10T13:28:12.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:12 vm00 ceph-mon[96293]: Upgrade: Finalizing container_image settings 2026-03-10T13:28:12.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:12 vm00 ceph-mon[96293]: Upgrade: Complete! 2026-03-10T13:28:12.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:12 vm00 ceph-mon[96293]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:28:12.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:12 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all nvmeof 2026-03-10T13:28:12.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:12 vm08 ceph-mon[82639]: Upgrade: Finalizing container_image settings 2026-03-10T13:28:12.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:12 vm08 ceph-mon[82639]: Upgrade: Complete! 2026-03-10T13:28:12.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:12 vm08 ceph-mon[82639]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:28:13.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:13 vm00 ceph-mon[94470]: pgmap v52: 161 pgs: 33 active+undersized, 14 active+undersized+degraded, 114 active+clean; 457 KiB data, 163 MiB used, 160 GiB / 160 GiB avail; 56/627 objects degraded (8.931%) 2026-03-10T13:28:13.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:13 vm00 ceph-mon[94470]: OSD bench result of 30654.591714 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.2. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-10T13:28:13.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:13 vm00 ceph-mon[94470]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T13:28:13.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:13 vm00 ceph-mon[94470]: osd.2 [v2:192.168.123.100:6818/4134418130,v1:192.168.123.100:6819/4134418130] boot 2026-03-10T13:28:13.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:13 vm00 ceph-mon[94470]: osdmap e108: 8 total, 8 up, 8 in 2026-03-10T13:28:13.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T13:28:13.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:13.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:28:13.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:13.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:13 vm00 ceph-mon[96293]: pgmap v52: 161 pgs: 33 active+undersized, 14 active+undersized+degraded, 114 active+clean; 457 KiB data, 163 MiB used, 160 GiB / 160 GiB avail; 56/627 objects degraded (8.931%) 2026-03-10T13:28:13.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:13 vm00 ceph-mon[96293]: OSD bench result of 30654.591714 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.2. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-10T13:28:13.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:13 vm00 ceph-mon[96293]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T13:28:13.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:13 vm00 ceph-mon[96293]: osd.2 [v2:192.168.123.100:6818/4134418130,v1:192.168.123.100:6819/4134418130] boot 2026-03-10T13:28:13.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:13 vm00 ceph-mon[96293]: osdmap e108: 8 total, 8 up, 8 in 2026-03-10T13:28:13.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T13:28:13.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:13.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:28:13.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:13.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:13 vm08 ceph-mon[82639]: pgmap v52: 161 pgs: 33 active+undersized, 14 active+undersized+degraded, 114 active+clean; 457 KiB data, 163 MiB used, 160 GiB / 160 GiB avail; 56/627 objects degraded (8.931%) 2026-03-10T13:28:13.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:13 vm08 ceph-mon[82639]: OSD bench result of 30654.591714 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.2. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-10T13:28:13.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:13 vm08 ceph-mon[82639]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T13:28:13.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:13 vm08 ceph-mon[82639]: osd.2 [v2:192.168.123.100:6818/4134418130,v1:192.168.123.100:6819/4134418130] boot 2026-03-10T13:28:13.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:13 vm08 ceph-mon[82639]: osdmap e108: 8 total, 8 up, 8 in 2026-03-10T13:28:13.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T13:28:13.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:13.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:28:13.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:14.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:14 vm00 ceph-mon[94470]: osdmap e109: 8 total, 8 up, 8 in 2026-03-10T13:28:14.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:14 vm00 ceph-mon[96293]: osdmap e109: 8 total, 8 up, 8 in 2026-03-10T13:28:14.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:14 vm08 ceph-mon[82639]: osdmap e109: 8 total, 8 up, 8 in 2026-03-10T13:28:15.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:15 vm00 ceph-mon[94470]: pgmap v55: 161 pgs: 33 active+undersized, 14 active+undersized+degraded, 114 active+clean; 457 KiB data, 163 MiB used, 160 GiB / 160 GiB avail; 56/627 objects degraded (8.931%) 2026-03-10T13:28:15.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:15 vm00 ceph-mon[96293]: pgmap v55: 161 pgs: 33 active+undersized, 14 active+undersized+degraded, 114 active+clean; 457 KiB data, 163 MiB used, 160 GiB / 160 GiB avail; 56/627 objects degraded (8.931%) 2026-03-10T13:28:15.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:15 vm08 ceph-mon[82639]: pgmap v55: 161 pgs: 33 active+undersized, 14 active+undersized+degraded, 114 active+clean; 457 KiB data, 163 MiB used, 160 GiB / 160 GiB avail; 56/627 objects degraded (8.931%) 2026-03-10T13:28:16.002 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:28:15 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:28:15] "GET /metrics HTTP/1.1" 200 37543 "" "Prometheus/2.51.0" 2026-03-10T13:28:16.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:16 vm00 ceph-mon[94470]: pgmap v56: 161 pgs: 12 active+undersized, 1 active+undersized+degraded, 148 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 1/627 objects degraded (0.159%) 2026-03-10T13:28:16.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:16 vm00 ceph-mon[94470]: Health check update: Degraded data redundancy: 1/627 objects degraded (0.159%), 1 pg degraded (PG_DEGRADED) 2026-03-10T13:28:16.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:16 vm00 ceph-mon[96293]: pgmap v56: 161 pgs: 12 active+undersized, 1 active+undersized+degraded, 148 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 1/627 objects degraded (0.159%) 2026-03-10T13:28:16.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:16 vm00 ceph-mon[96293]: Health check update: Degraded data redundancy: 1/627 objects degraded (0.159%), 1 pg degraded (PG_DEGRADED) 2026-03-10T13:28:16.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:16 vm08 ceph-mon[82639]: pgmap v56: 161 pgs: 12 active+undersized, 1 active+undersized+degraded, 148 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 1/627 objects degraded (0.159%) 2026-03-10T13:28:16.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:16 vm08 ceph-mon[82639]: Health check update: Degraded data redundancy: 1/627 objects degraded (0.159%), 1 pg degraded (PG_DEGRADED) 2026-03-10T13:28:17.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:28:16 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:28:16.995Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:28:17.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:28:16 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:28:16.995Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:28:17.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:17 vm00 ceph-mon[94470]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 1/627 objects degraded (0.159%), 1 pg degraded) 2026-03-10T13:28:17.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:17 vm00 ceph-mon[94470]: Cluster is now healthy 2026-03-10T13:28:17.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:17 vm00 ceph-mon[96293]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 1/627 objects degraded (0.159%), 1 pg degraded) 2026-03-10T13:28:17.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:17 vm00 ceph-mon[96293]: Cluster is now healthy 2026-03-10T13:28:17.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:17 vm08 ceph-mon[82639]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 1/627 objects degraded (0.159%), 1 pg degraded) 2026-03-10T13:28:17.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:17 vm08 ceph-mon[82639]: Cluster is now healthy 2026-03-10T13:28:18.671 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:18 vm08 ceph-mon[82639]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 564 MiB used, 159 GiB / 160 GiB avail; 748 B/s rd, 0 op/s 2026-03-10T13:28:18.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:18 vm00 ceph-mon[94470]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 564 MiB used, 159 GiB / 160 GiB avail; 748 B/s rd, 0 op/s 2026-03-10T13:28:18.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:18 vm00 ceph-mon[96293]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 564 MiB used, 159 GiB / 160 GiB avail; 748 B/s rd, 0 op/s 2026-03-10T13:28:20.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:20 vm00 ceph-mon[94470]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 564 MiB used, 159 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:28:20.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:20 vm00 ceph-mon[96293]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 564 MiB used, 159 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:28:20.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:20 vm08 ceph-mon[82639]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 564 MiB used, 159 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:28:21.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:21 vm00 ceph-mon[94470]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:28:21.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:21 vm00 ceph-mon[96293]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:28:21.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:21 vm08 ceph-mon[82639]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:28:22.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:22 vm00 ceph-mon[94470]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T13:28:22.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:22 vm00 ceph-mon[96293]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T13:28:22.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:22 vm08 ceph-mon[82639]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T13:28:24.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:24 vm00 ceph-mon[94470]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 997 B/s rd, 0 op/s 2026-03-10T13:28:24.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:24 vm00 ceph-mon[96293]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 997 B/s rd, 0 op/s 2026-03-10T13:28:24.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:24 vm08 ceph-mon[82639]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 997 B/s rd, 0 op/s 2026-03-10T13:28:26.002 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:28:25 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:28:25] "GET /metrics HTTP/1.1" 200 37543 "" "Prometheus/2.51.0" 2026-03-10T13:28:26.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:26 vm00 ceph-mon[94470]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:28:26.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:26 vm00 ceph-mon[96293]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:28:26.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:26 vm08 ceph-mon[82639]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:28:27.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:28:26 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:28:26.996Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:28:27.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:28:26 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:28:26.996Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:28:28.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:28 vm00 ceph-mon[94470]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:28:28.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:28 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:28.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:28 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:28:28.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:28 vm00 ceph-mon[96293]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:28:28.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:28 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:28.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:28 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:28:28.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:28 vm08 ceph-mon[82639]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:28:28.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:28 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:28.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:28 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:28:30.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:30 vm00 ceph-mon[94470]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:28:30.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:30 vm00 ceph-mon[96293]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:28:30.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:30 vm08 ceph-mon[82639]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:28:31.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:31 vm00 ceph-mon[94470]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:28:31.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:31 vm00 ceph-mon[96293]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:28:31.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:31 vm08 ceph-mon[82639]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:28:32.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:32 vm00 ceph-mon[94470]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:28:32.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:32 vm00 ceph-mon[96293]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:28:32.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:32 vm08 ceph-mon[82639]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:28:34.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:34 vm00 ceph-mon[94470]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:28:34.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:34 vm00 ceph-mon[96293]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:28:34.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:34 vm08 ceph-mon[82639]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:28:35.092 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-10T13:28:35.592 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:35 vm00 ceph-mon[94470]: from='client.54190 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:35.592 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:35 vm00 ceph-mon[96293]: from='client.54190 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:35.593 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T13:28:35.593 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (15m) 26s ago 21m 26.4M - 0.25.0 c8568f914cd2 12fde3cf83cb 2026-03-10T13:28:35.593 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm08 *:3000 running (2m) 96s ago 21m 74.2M - 10.4.0 c8b91775d855 960e32589e98 2026-03-10T13:28:35.593 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dezodo vm00 running (2m) 26s ago 21m 49.3M - 3.5 e1d6a67b021e 630bf6d4e7f3 2026-03-10T13:28:35.593 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm08 *:8443,9283,8765 running (2m) 96s ago 23m 486M - 19.2.3-678-ge911bdeb 654f31e6858e 31b91eebc856 2026-03-10T13:28:35.593 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (9m) 26s ago 24m 554M - 19.2.3-678-ge911bdeb 654f31e6858e 5bc576d4d32b 2026-03-10T13:28:35.593 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (102s) 26s ago 24m 48.5M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 981df6371890 2026-03-10T13:28:35.593 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm08 running (2m) 96s ago 23m 37.6M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 8cceb678a9ee 2026-03-10T13:28:35.593 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (93s) 26s ago 23m 39.3M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 43deda66dee3 2026-03-10T13:28:35.593 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (15m) 26s ago 21m 10.8M - 1.7.0 72c9c2088986 bcf883401619 2026-03-10T13:28:35.593 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm08 *:9100 running (14m) 96s ago 21m 10.0M - 1.7.0 72c9c2088986 4ac83f03f818 2026-03-10T13:28:35.593 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (64s) 26s ago 23m 45.3M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 5fc74f4d2179 2026-03-10T13:28:35.593 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (57s) 26s ago 22m 67.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e dc65e199e9eb 2026-03-10T13:28:35.593 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (27s) 26s ago 22m 13.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 706171e0f5c2 2026-03-10T13:28:35.593 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (22m) 26s ago 22m 55.3M 4096M 17.2.0 e1d6a67b021e 98bb6d7ea69f 2026-03-10T13:28:35.593 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm08 running (22m) 96s ago 22m 54.7M 4096M 17.2.0 e1d6a67b021e e349440ca776 2026-03-10T13:28:35.593 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm08 running (22m) 96s ago 22m 56.5M 4096M 17.2.0 e1d6a67b021e 9a6a2a67cc99 2026-03-10T13:28:35.593 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm08 running (22m) 96s ago 22m 52.2M 4096M 17.2.0 e1d6a67b021e c27676916d52 2026-03-10T13:28:35.593 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm08 running (22m) 96s ago 22m 54.0M 4096M 17.2.0 e1d6a67b021e bf67951990a5 2026-03-10T13:28:35.593 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm08 *:9095 running (2m) 96s ago 21m 48.2M - 2.51.0 1d3b7f56885b 3f9b2d0821c9 2026-03-10T13:28:35.593 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.tvlvzo vm00 *:8000 running (21m) 26s ago 21m 97.9M - 17.2.0 e1d6a67b021e be24eac16807 2026-03-10T13:28:35.593 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm08.ljayps vm08 *:8000 running (21m) 96s ago 21m 94.3M - 17.2.0 e1d6a67b021e 67b525427823 2026-03-10T13:28:35.656 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.osd | length == 2'"'"'' 2026-03-10T13:28:35.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:35 vm08 ceph-mon[82639]: from='client.54190 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:35.845 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:28:35 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:28:35] "GET /metrics HTTP/1.1" 200 37680 "" "Prometheus/2.51.0" 2026-03-10T13:28:36.170 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T13:28:36.220 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '"'"'.up_to_date | length == 8'"'"'' 2026-03-10T13:28:36.424 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:36 vm00 ceph-mon[94470]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:28:36.424 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:36 vm00 ceph-mon[94470]: from='client.54196 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:36.424 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:36 vm00 ceph-mon[94470]: from='client.? 192.168.123.100:0/1270785215' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:36.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:36 vm00 ceph-mon[96293]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:28:36.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:36 vm00 ceph-mon[96293]: from='client.54196 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:36.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:36 vm00 ceph-mon[96293]: from='client.? 192.168.123.100:0/1270785215' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:36.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:36 vm08 ceph-mon[82639]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:28:36.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:36 vm08 ceph-mon[82639]: from='client.54196 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:36.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:36 vm08 ceph-mon[82639]: from='client.? 192.168.123.100:0/1270785215' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:36.969 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T13:28:37.028 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade status' 2026-03-10T13:28:37.179 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:28:36 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:28:36.996Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:28:37.179 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:28:37 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:28:37.000Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:28:37.580 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:28:37.580 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": null, 2026-03-10T13:28:37.580 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": false, 2026-03-10T13:28:37.580 INFO:teuthology.orchestra.run.vm00.stdout: "which": "", 2026-03-10T13:28:37.580 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [], 2026-03-10T13:28:37.580 INFO:teuthology.orchestra.run.vm00.stdout: "progress": null, 2026-03-10T13:28:37.580 INFO:teuthology.orchestra.run.vm00.stdout: "message": "", 2026-03-10T13:28:37.580 INFO:teuthology.orchestra.run.vm00.stdout: "is_paused": false 2026-03-10T13:28:37.580 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:28:37.650 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph health detail' 2026-03-10T13:28:37.746 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:37 vm00 ceph-mon[94470]: from='client.44277 -' entity='client.admin' cmd=[{"prefix": "orch upgrade check", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:37.747 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:37 vm00 ceph-mon[96293]: from='client.44277 -' entity='client.admin' cmd=[{"prefix": "orch upgrade check", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:37.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:37 vm08 ceph-mon[82639]: from='client.44277 -' entity='client.admin' cmd=[{"prefix": "orch upgrade check", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:38.154 INFO:teuthology.orchestra.run.vm00.stdout:HEALTH_OK 2026-03-10T13:28:38.211 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types crash,osd' 2026-03-10T13:28:38.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:38 vm00 ceph-mon[94470]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:28:38.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:38 vm00 ceph-mon[94470]: from='client.44283 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:38.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:38 vm00 ceph-mon[94470]: from='client.? 192.168.123.100:0/2155075474' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T13:28:38.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:38 vm00 ceph-mon[96293]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:28:38.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:38 vm00 ceph-mon[96293]: from='client.44283 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:38.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:38 vm00 ceph-mon[96293]: from='client.? 192.168.123.100:0/2155075474' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T13:28:38.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:38 vm08 ceph-mon[82639]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:28:38.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:38 vm08 ceph-mon[82639]: from='client.44283 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:38.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:38 vm08 ceph-mon[82639]: from='client.? 192.168.123.100:0/2155075474' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T13:28:38.962 INFO:teuthology.orchestra.run.vm00.stdout:Initiating upgrade to quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:28:39.014 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'while ceph orch upgrade status | jq '"'"'.in_progress'"'"' | grep true && ! ceph orch upgrade status | jq '"'"'.message'"'"' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done' 2026-03-10T13:28:39.559 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T13:28:39.964 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:39 vm00 ceph-mon[94470]: from='client.34301 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "crash,osd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:39.964 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:39 vm00 ceph-mon[94470]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:28:39.965 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:39 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:39.965 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:39 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:28:39.965 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:39 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:28:39.965 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:39 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:39.965 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:39 vm00 ceph-mon[94470]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:28:39.965 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:39 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:39.965 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:39 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:28:39.965 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:39 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:39.965 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:39 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:39.965 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:39 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:39.965 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:39 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:39.965 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:39 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:39.965 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:39 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:39.965 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:39 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T13:28:39.965 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:39 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:39.965 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:39 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-10T13:28:39.965 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:39 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:28:39.965 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:39 vm00 ceph-mon[96293]: from='client.34301 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "crash,osd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:39.965 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:39 vm00 ceph-mon[96293]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:28:39.965 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:39 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:39.965 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:39 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:28:39.965 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:39 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:28:39.965 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:39 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:39.965 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:39 vm00 ceph-mon[96293]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:28:39.965 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:39 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:39.965 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:39 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:28:39.965 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:39 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:39.965 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:39 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:39.965 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:39 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:39.965 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:39 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:39.965 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:39 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:39.965 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:39 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:39.965 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:39 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T13:28:39.965 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:39 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:39.965 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:39 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-10T13:28:39.965 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:39 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:28:40.043 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T13:28:40.043 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (15m) 31s ago 21m 26.4M - 0.25.0 c8568f914cd2 12fde3cf83cb 2026-03-10T13:28:40.043 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm08 *:3000 running (2m) 101s ago 21m 74.2M - 10.4.0 c8b91775d855 960e32589e98 2026-03-10T13:28:40.043 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dezodo vm00 running (2m) 31s ago 21m 49.3M - 3.5 e1d6a67b021e 630bf6d4e7f3 2026-03-10T13:28:40.043 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm08 *:8443,9283,8765 running (2m) 101s ago 23m 486M - 19.2.3-678-ge911bdeb 654f31e6858e 31b91eebc856 2026-03-10T13:28:40.043 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (9m) 31s ago 24m 554M - 19.2.3-678-ge911bdeb 654f31e6858e 5bc576d4d32b 2026-03-10T13:28:40.043 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (106s) 31s ago 24m 48.5M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 981df6371890 2026-03-10T13:28:40.043 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm08 running (2m) 101s ago 23m 37.6M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 8cceb678a9ee 2026-03-10T13:28:40.043 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (98s) 31s ago 23m 39.3M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 43deda66dee3 2026-03-10T13:28:40.043 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (15m) 31s ago 21m 10.8M - 1.7.0 72c9c2088986 bcf883401619 2026-03-10T13:28:40.043 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm08 *:9100 running (15m) 101s ago 21m 10.0M - 1.7.0 72c9c2088986 4ac83f03f818 2026-03-10T13:28:40.043 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (68s) 31s ago 23m 45.3M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 5fc74f4d2179 2026-03-10T13:28:40.043 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (61s) 31s ago 23m 67.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e dc65e199e9eb 2026-03-10T13:28:40.043 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (32s) 31s ago 22m 13.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 706171e0f5c2 2026-03-10T13:28:40.044 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (22m) 31s ago 22m 55.3M 4096M 17.2.0 e1d6a67b021e 98bb6d7ea69f 2026-03-10T13:28:40.044 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm08 running (22m) 101s ago 22m 54.7M 4096M 17.2.0 e1d6a67b021e e349440ca776 2026-03-10T13:28:40.044 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm08 running (22m) 101s ago 22m 56.5M 4096M 17.2.0 e1d6a67b021e 9a6a2a67cc99 2026-03-10T13:28:40.044 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm08 running (22m) 101s ago 22m 52.2M 4096M 17.2.0 e1d6a67b021e c27676916d52 2026-03-10T13:28:40.044 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm08 running (22m) 101s ago 22m 54.0M 4096M 17.2.0 e1d6a67b021e bf67951990a5 2026-03-10T13:28:40.044 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm08 *:9095 running (2m) 101s ago 21m 48.2M - 2.51.0 1d3b7f56885b 3f9b2d0821c9 2026-03-10T13:28:40.044 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.tvlvzo vm00 *:8000 running (21m) 31s ago 21m 97.9M - 17.2.0 e1d6a67b021e be24eac16807 2026-03-10T13:28:40.044 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm08.ljayps vm08 *:8000 running (21m) 101s ago 21m 94.3M - 17.2.0 e1d6a67b021e 67b525427823 2026-03-10T13:28:40.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:39 vm08 ceph-mon[82639]: from='client.34301 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "crash,osd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:40.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:39 vm08 ceph-mon[82639]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:28:40.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:39 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:40.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:39 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:28:40.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:39 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:28:40.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:39 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:40.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:39 vm08 ceph-mon[82639]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:28:40.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:39 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:40.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:39 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:28:40.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:39 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:40.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:39 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:40.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:39 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:40.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:39 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:40.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:39 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:40.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:39 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:40.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:39 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T13:28:40.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:39 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:40.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:39 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-10T13:28:40.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:39 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:28:40.322 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:28:40.322 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T13:28:40.322 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-10T13:28:40.322 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:28:40.322 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T13:28:40.322 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T13:28:40.322 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:28:40.322 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T13:28:40.322 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 5, 2026-03-10T13:28:40.322 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-10T13:28:40.322 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:28:40.322 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T13:28:40.322 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-10T13:28:40.322 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:28:40.322 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T13:28:40.322 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 7, 2026-03-10T13:28:40.322 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 8 2026-03-10T13:28:40.322 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T13:28:40.322 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:28:40.556 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:28:40.556 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T13:28:40.556 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": true, 2026-03-10T13:28:40.556 INFO:teuthology.orchestra.run.vm00.stdout: "which": "Upgrading daemons of type(s) crash,osd", 2026-03-10T13:28:40.556 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [], 2026-03-10T13:28:40.556 INFO:teuthology.orchestra.run.vm00.stdout: "progress": "3/8 daemons upgraded", 2026-03-10T13:28:40.556 INFO:teuthology.orchestra.run.vm00.stdout: "message": "Currently upgrading osd daemons", 2026-03-10T13:28:40.556 INFO:teuthology.orchestra.run.vm00.stdout: "is_paused": false 2026-03-10T13:28:40.556 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:28:40.998 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:40 vm00 systemd[1]: Stopping Ceph osd.3 for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:28:40.998 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:40 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3[62974]: 2026-03-10T13:28:40.652+0000 7f4d6ad3c700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.3 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T13:28:40.999 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:40 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3[62974]: 2026-03-10T13:28:40.652+0000 7f4d6ad3c700 -1 osd.3 109 *** Got signal Terminated *** 2026-03-10T13:28:40.999 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:40 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3[62974]: 2026-03-10T13:28:40.652+0000 7f4d6ad3c700 -1 osd.3 109 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T13:28:41.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:41 vm00 ceph-mon[94470]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:28:41.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:41 vm00 ceph-mon[94470]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-10T13:28:41.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:41 vm00 ceph-mon[94470]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-10T13:28:41.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:41 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all mgr 2026-03-10T13:28:41.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:41 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all mon 2026-03-10T13:28:41.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:41 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all crash 2026-03-10T13:28:41.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:41 vm00 ceph-mon[94470]: from='mon.2 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T13:28:41.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:41 vm00 ceph-mon[94470]: Upgrade: osd.3 is safe to restart 2026-03-10T13:28:41.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:41 vm00 ceph-mon[94470]: from='client.54220 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:41.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:41 vm00 ceph-mon[94470]: Upgrade: Updating osd.3 2026-03-10T13:28:41.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:41 vm00 ceph-mon[94470]: Deploying daemon osd.3 on vm00 2026-03-10T13:28:41.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:41 vm00 ceph-mon[94470]: from='client.54226 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:41.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:41 vm00 ceph-mon[94470]: from='client.44298 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:41.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:41 vm00 ceph-mon[94470]: from='client.? 192.168.123.100:0/2977683646' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:41.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:41 vm00 ceph-mon[94470]: osd.3 marked itself down and dead 2026-03-10T13:28:41.253 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:41 vm00 podman[116036]: 2026-03-10 13:28:41.033389447 +0000 UTC m=+0.391669781 container died 98bb6d7ea69f5f7058b27bc7bb8d35955f202a2d150e5d1cd69c8e9827a56313 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3, version=8, ceph=True, distribution-scope=public, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, io.openshift.tags=base centos centos-stream, maintainer=Guillaume Abrioux , com.redhat.license_terms=https://centos.org/legal/licensing-policy/, release=754, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, RELEASE=HEAD, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, vcs-type=git, GIT_BRANCH=HEAD, GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.k8s.display-name=CentOS Stream 8, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, io.openshift.expose-services=, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, vendor=Red Hat, Inc., GIT_CLEAN=True, architecture=x86_64, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.component=centos-stream-container, io.buildah.version=1.19.8, name=centos-stream, CEPH_POINT_RELEASE=-17.2.0, build-date=2022-05-03T08:36:31.336870) 2026-03-10T13:28:41.253 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:41 vm00 podman[116036]: 2026-03-10 13:28:41.053512257 +0000 UTC m=+0.411792591 container remove 98bb6d7ea69f5f7058b27bc7bb8d35955f202a2d150e5d1cd69c8e9827a56313 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, vcs-type=git, vendor=Red Hat, Inc., io.buildah.version=1.19.8, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., GIT_BRANCH=HEAD, RELEASE=HEAD, io.openshift.expose-services=, ceph=True, GIT_CLEAN=True, io.k8s.display-name=CentOS Stream 8, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, architecture=x86_64, distribution-scope=public, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.openshift.tags=base centos centos-stream, CEPH_POINT_RELEASE=-17.2.0, maintainer=Guillaume Abrioux , com.redhat.component=centos-stream-container, release=754, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, version=8, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, name=centos-stream, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, GIT_REPO=https://github.com/ceph/ceph-container.git, build-date=2022-05-03T08:36:31.336870) 2026-03-10T13:28:41.253 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:41 vm00 bash[116036]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3 2026-03-10T13:28:41.253 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:41 vm00 podman[116104]: 2026-03-10 13:28:41.179783766 +0000 UTC m=+0.015167986 container create 7cb4fc4ab62b1a3f554cd62e494f695ed887223e9759033edf9c362a809e58bd (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3-deactivate, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, OSD_FLAVOR=default, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-10T13:28:41.253 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:41 vm00 podman[116104]: 2026-03-10 13:28:41.218354018 +0000 UTC m=+0.053738238 container init 7cb4fc4ab62b1a3f554cd62e494f695ed887223e9759033edf9c362a809e58bd (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3-deactivate, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, ceph=True) 2026-03-10T13:28:41.253 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:41 vm00 podman[116104]: 2026-03-10 13:28:41.221062128 +0000 UTC m=+0.056446348 container start 7cb4fc4ab62b1a3f554cd62e494f695ed887223e9759033edf9c362a809e58bd (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3-deactivate, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid) 2026-03-10T13:28:41.253 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:41 vm00 podman[116104]: 2026-03-10 13:28:41.224094044 +0000 UTC m=+0.059478264 container attach 7cb4fc4ab62b1a3f554cd62e494f695ed887223e9759033edf9c362a809e58bd (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3-deactivate, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, io.buildah.version=1.41.3, CEPH_REF=squid, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T13:28:41.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:41 vm00 ceph-mon[96293]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:28:41.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:41 vm00 ceph-mon[96293]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-10T13:28:41.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:41 vm00 ceph-mon[96293]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-10T13:28:41.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:41 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all mgr 2026-03-10T13:28:41.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:41 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all mon 2026-03-10T13:28:41.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:41 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all crash 2026-03-10T13:28:41.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:41 vm00 ceph-mon[96293]: from='mon.2 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T13:28:41.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:41 vm00 ceph-mon[96293]: Upgrade: osd.3 is safe to restart 2026-03-10T13:28:41.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:41 vm00 ceph-mon[96293]: from='client.54220 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:41.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:41 vm00 ceph-mon[96293]: Upgrade: Updating osd.3 2026-03-10T13:28:41.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:41 vm00 ceph-mon[96293]: Deploying daemon osd.3 on vm00 2026-03-10T13:28:41.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:41 vm00 ceph-mon[96293]: from='client.54226 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:41.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:41 vm00 ceph-mon[96293]: from='client.44298 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:41.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:41 vm00 ceph-mon[96293]: from='client.? 192.168.123.100:0/2977683646' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:41.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:41 vm00 ceph-mon[96293]: osd.3 marked itself down and dead 2026-03-10T13:28:41.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:41 vm08 ceph-mon[82639]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:28:41.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:41 vm08 ceph-mon[82639]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-10T13:28:41.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:41 vm08 ceph-mon[82639]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-10T13:28:41.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:41 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all mgr 2026-03-10T13:28:41.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:41 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all mon 2026-03-10T13:28:41.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:41 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all crash 2026-03-10T13:28:41.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:41 vm08 ceph-mon[82639]: from='mon.2 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T13:28:41.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:41 vm08 ceph-mon[82639]: Upgrade: osd.3 is safe to restart 2026-03-10T13:28:41.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:41 vm08 ceph-mon[82639]: from='client.54220 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:41.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:41 vm08 ceph-mon[82639]: Upgrade: Updating osd.3 2026-03-10T13:28:41.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:41 vm08 ceph-mon[82639]: Deploying daemon osd.3 on vm00 2026-03-10T13:28:41.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:41 vm08 ceph-mon[82639]: from='client.54226 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:41.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:41 vm08 ceph-mon[82639]: from='client.44298 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:41.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:41 vm08 ceph-mon[82639]: from='client.? 192.168.123.100:0/2977683646' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:41.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:41 vm08 ceph-mon[82639]: osd.3 marked itself down and dead 2026-03-10T13:28:41.530 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:41 vm00 podman[116104]: 2026-03-10 13:28:41.173633201 +0000 UTC m=+0.009017421 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:28:41.531 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:41 vm00 podman[116104]: 2026-03-10 13:28:41.341713355 +0000 UTC m=+0.177097575 container died 7cb4fc4ab62b1a3f554cd62e494f695ed887223e9759033edf9c362a809e58bd (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2) 2026-03-10T13:28:41.531 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:41 vm00 podman[116104]: 2026-03-10 13:28:41.35647588 +0000 UTC m=+0.191860100 container remove 7cb4fc4ab62b1a3f554cd62e494f695ed887223e9759033edf9c362a809e58bd (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3-deactivate, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T13:28:41.531 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:41 vm00 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.3.service: Deactivated successfully. 2026-03-10T13:28:41.531 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:41 vm00 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.3.service: Unit process 116115 (conmon) remains running after unit stopped. 2026-03-10T13:28:41.531 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:41 vm00 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.3.service: Unit process 116124 (podman) remains running after unit stopped. 2026-03-10T13:28:41.531 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:41 vm00 systemd[1]: Stopped Ceph osd.3 for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:28:41.531 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:41 vm00 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.3.service: Consumed 31.504s CPU time, 214.5M memory peak. 2026-03-10T13:28:41.531 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:41 vm00 systemd[1]: Starting Ceph osd.3 for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:28:42.003 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:41 vm00 podman[116207]: 2026-03-10 13:28:41.614699991 +0000 UTC m=+0.015743763 container create 512359c86c6ed68bed57ca02d48370c64396a9c1e85d0b9adf514c007c0ffd61 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3-activate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, ceph=True, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T13:28:42.003 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:41 vm00 podman[116207]: 2026-03-10 13:28:41.644458979 +0000 UTC m=+0.045502742 container init 512359c86c6ed68bed57ca02d48370c64396a9c1e85d0b9adf514c007c0ffd61 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3-activate, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, ceph=True, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T13:28:42.003 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:41 vm00 podman[116207]: 2026-03-10 13:28:41.647411386 +0000 UTC m=+0.048455158 container start 512359c86c6ed68bed57ca02d48370c64396a9c1e85d0b9adf514c007c0ffd61 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3-activate, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-10T13:28:42.003 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:41 vm00 podman[116207]: 2026-03-10 13:28:41.648367567 +0000 UTC m=+0.049411339 container attach 512359c86c6ed68bed57ca02d48370c64396a9c1e85d0b9adf514c007c0ffd61 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3-activate, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T13:28:42.003 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:41 vm00 podman[116207]: 2026-03-10 13:28:41.608303437 +0000 UTC m=+0.009347209 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:28:42.003 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:41 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3-activate[116217]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:28:42.003 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:41 vm00 bash[116207]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:28:42.003 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:41 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3-activate[116217]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:28:42.003 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:41 vm00 bash[116207]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:28:42.487 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:42 vm00 ceph-mon[94470]: from='client.54235 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:42.487 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:42 vm00 ceph-mon[94470]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T13:28:42.487 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:42 vm00 ceph-mon[94470]: osdmap e110: 8 total, 7 up, 8 in 2026-03-10T13:28:42.487 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:42 vm00 ceph-mon[94470]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:28:42.488 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:42 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3-activate[116217]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T13:28:42.488 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:42 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3-activate[116217]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:28:42.488 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:42 vm00 bash[116207]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T13:28:42.488 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:42 vm00 bash[116207]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:28:42.488 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:42 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3-activate[116217]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:28:42.488 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:42 vm00 bash[116207]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:28:42.488 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:42 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3-activate[116217]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3 2026-03-10T13:28:42.488 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:42 vm00 bash[116207]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3 2026-03-10T13:28:42.488 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:42 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3-activate[116217]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-bcac9be6-a1f4-408b-8a45-1c86ac7630e8/osd-block-36dd1fdb-2d5f-4be6-b549-9bcc7e503439 --path /var/lib/ceph/osd/ceph-3 --no-mon-config 2026-03-10T13:28:42.488 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:42 vm00 bash[116207]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-bcac9be6-a1f4-408b-8a45-1c86ac7630e8/osd-block-36dd1fdb-2d5f-4be6-b549-9bcc7e503439 --path /var/lib/ceph/osd/ceph-3 --no-mon-config 2026-03-10T13:28:42.488 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:42 vm00 ceph-mon[96293]: from='client.54235 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:42.488 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:42 vm00 ceph-mon[96293]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T13:28:42.488 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:42 vm00 ceph-mon[96293]: osdmap e110: 8 total, 7 up, 8 in 2026-03-10T13:28:42.488 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:42 vm00 ceph-mon[96293]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:28:42.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:42 vm08 ceph-mon[82639]: from='client.54235 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:28:42.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:42 vm08 ceph-mon[82639]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T13:28:42.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:42 vm08 ceph-mon[82639]: osdmap e110: 8 total, 7 up, 8 in 2026-03-10T13:28:42.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:42 vm08 ceph-mon[82639]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:28:42.753 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:42 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3-activate[116217]: Running command: /usr/bin/ln -snf /dev/ceph-bcac9be6-a1f4-408b-8a45-1c86ac7630e8/osd-block-36dd1fdb-2d5f-4be6-b549-9bcc7e503439 /var/lib/ceph/osd/ceph-3/block 2026-03-10T13:28:42.753 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:42 vm00 bash[116207]: Running command: /usr/bin/ln -snf /dev/ceph-bcac9be6-a1f4-408b-8a45-1c86ac7630e8/osd-block-36dd1fdb-2d5f-4be6-b549-9bcc7e503439 /var/lib/ceph/osd/ceph-3/block 2026-03-10T13:28:42.753 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:42 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3-activate[116217]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-3/block 2026-03-10T13:28:42.753 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:42 vm00 bash[116207]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-3/block 2026-03-10T13:28:42.753 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:42 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3-activate[116217]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-10T13:28:42.753 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:42 vm00 bash[116207]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-10T13:28:42.753 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:42 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3-activate[116217]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3 2026-03-10T13:28:42.753 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:42 vm00 bash[116207]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3 2026-03-10T13:28:42.753 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:42 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3-activate[116217]: --> ceph-volume lvm activate successful for osd ID: 3 2026-03-10T13:28:42.753 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:42 vm00 bash[116207]: --> ceph-volume lvm activate successful for osd ID: 3 2026-03-10T13:28:42.753 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:42 vm00 conmon[116217]: conmon 512359c86c6ed68bed57 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-512359c86c6ed68bed57ca02d48370c64396a9c1e85d0b9adf514c007c0ffd61.scope/container/memory.events 2026-03-10T13:28:42.753 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:42 vm00 podman[116207]: 2026-03-10 13:28:42.510733369 +0000 UTC m=+0.911777132 container died 512359c86c6ed68bed57ca02d48370c64396a9c1e85d0b9adf514c007c0ffd61 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3-activate, org.label-schema.schema-version=1.0, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, OSD_FLAVOR=default) 2026-03-10T13:28:42.753 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:42 vm00 podman[116207]: 2026-03-10 13:28:42.526045233 +0000 UTC m=+0.927089005 container remove 512359c86c6ed68bed57ca02d48370c64396a9c1e85d0b9adf514c007c0ffd61 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3-activate, CEPH_REF=squid, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T13:28:42.753 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:42 vm00 podman[116457]: 2026-03-10 13:28:42.611814792 +0000 UTC m=+0.015538769 container create 8739c77cf14d740cb93d4969a99c77f8d2219be203a0dcd995a3eb9d9c66fb34 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T13:28:42.753 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:42 vm00 podman[116457]: 2026-03-10 13:28:42.64111967 +0000 UTC m=+0.044843647 container init 8739c77cf14d740cb93d4969a99c77f8d2219be203a0dcd995a3eb9d9c66fb34 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T13:28:42.753 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:42 vm00 podman[116457]: 2026-03-10 13:28:42.64420676 +0000 UTC m=+0.047930726 container start 8739c77cf14d740cb93d4969a99c77f8d2219be203a0dcd995a3eb9d9c66fb34 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-10T13:28:42.753 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:42 vm00 bash[116457]: 8739c77cf14d740cb93d4969a99c77f8d2219be203a0dcd995a3eb9d9c66fb34 2026-03-10T13:28:42.753 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:42 vm00 podman[116457]: 2026-03-10 13:28:42.605614877 +0000 UTC m=+0.009338863 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:28:42.753 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:42 vm00 systemd[1]: Started Ceph osd.3 for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:28:43.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:43 vm00 ceph-mon[94470]: pgmap v70: 161 pgs: 27 stale+active+clean, 134 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T13:28:43.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:43 vm00 ceph-mon[94470]: osdmap e111: 8 total, 7 up, 8 in 2026-03-10T13:28:43.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:43 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:43.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:43 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:28:43.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:43 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:43.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:43 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:43.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:43 vm00 ceph-mon[96293]: pgmap v70: 161 pgs: 27 stale+active+clean, 134 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T13:28:43.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:43 vm00 ceph-mon[96293]: osdmap e111: 8 total, 7 up, 8 in 2026-03-10T13:28:43.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:43 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:43.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:43 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:28:43.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:43 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:43.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:43 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:43.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:43 vm08 ceph-mon[82639]: pgmap v70: 161 pgs: 27 stale+active+clean, 134 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T13:28:43.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:43 vm08 ceph-mon[82639]: osdmap e111: 8 total, 7 up, 8 in 2026-03-10T13:28:43.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:43 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:43.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:43 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:28:43.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:43 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:43.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:43 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:43.577 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:43 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3[116469]: 2026-03-10T13:28:43.460+0000 7f8480690740 -1 Falling back to public interface 2026-03-10T13:28:44.117 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3[116469]: 2026-03-10T13:28:44.073+0000 7f8480690740 -1 osd.3 0 read_superblock omap replica is missing. 2026-03-10T13:28:44.117 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:44 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3[116469]: 2026-03-10T13:28:44.113+0000 7f8480690740 -1 osd.3 109 log_to_monitors true 2026-03-10T13:28:45.181 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:44 vm00 ceph-mon[94470]: pgmap v72: 161 pgs: 27 stale+active+clean, 134 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-10T13:28:45.181 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:44 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:45.181 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:44 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:45.181 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:44 vm00 ceph-mon[94470]: from='osd.3 [v2:192.168.123.100:6826/2114396828,v1:192.168.123.100:6827/2114396828]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T13:28:45.181 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:44 vm00 ceph-mon[94470]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T13:28:45.181 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:44 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:45.181 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:44 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:45.181 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:28:45 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3[116469]: 2026-03-10T13:28:45.089+0000 7f847843b640 -1 osd.3 109 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T13:28:45.181 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:44 vm00 ceph-mon[96293]: pgmap v72: 161 pgs: 27 stale+active+clean, 134 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-10T13:28:45.181 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:44 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:45.181 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:44 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:45.181 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:44 vm00 ceph-mon[96293]: from='osd.3 [v2:192.168.123.100:6826/2114396828,v1:192.168.123.100:6827/2114396828]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T13:28:45.181 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:44 vm00 ceph-mon[96293]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T13:28:45.181 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:44 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:45.181 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:44 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:45.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:44 vm08 ceph-mon[82639]: pgmap v72: 161 pgs: 27 stale+active+clean, 134 active+clean; 457 KiB data, 164 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-10T13:28:45.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:44 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:45.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:44 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:45.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:44 vm08 ceph-mon[82639]: from='osd.3 [v2:192.168.123.100:6826/2114396828,v1:192.168.123.100:6827/2114396828]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T13:28:45.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:44 vm08 ceph-mon[82639]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T13:28:45.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:44 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:45.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:44 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:45.502 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:28:45 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:28:45.179+0000 7f1445d54640 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (18 PGs are or would become offline) 2026-03-10T13:28:46.002 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:28:45 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:28:45] "GET /metrics HTTP/1.1" 200 37750 "" "Prometheus/2.51.0" 2026-03-10T13:28:46.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:46 vm00 ceph-mon[94470]: from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-10T13:28:46.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:46 vm00 ceph-mon[94470]: from='osd.3 [v2:192.168.123.100:6826/2114396828,v1:192.168.123.100:6827/2114396828]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:28:46.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:46 vm00 ceph-mon[94470]: osdmap e112: 8 total, 7 up, 8 in 2026-03-10T13:28:46.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:46 vm00 ceph-mon[94470]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:28:46.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:46 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:46.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:46 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:46.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:46 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:28:46.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:46 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:28:46.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:46 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:46.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:46 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:28:46.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:46 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:46.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:46 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:46.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:46 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:46.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:46 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-10T13:28:46.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:46 vm00 ceph-mon[96293]: from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-10T13:28:46.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:46 vm00 ceph-mon[96293]: from='osd.3 [v2:192.168.123.100:6826/2114396828,v1:192.168.123.100:6827/2114396828]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:28:46.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:46 vm00 ceph-mon[96293]: osdmap e112: 8 total, 7 up, 8 in 2026-03-10T13:28:46.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:46 vm00 ceph-mon[96293]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:28:46.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:46 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:46.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:46 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:46.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:46 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:28:46.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:46 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:28:46.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:46 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:46.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:46 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:28:46.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:46 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:46.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:46 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:46.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:46 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:46.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:46 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-10T13:28:46.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:46 vm08 ceph-mon[82639]: from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-10T13:28:46.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:46 vm08 ceph-mon[82639]: from='osd.3 [v2:192.168.123.100:6826/2114396828,v1:192.168.123.100:6827/2114396828]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:28:46.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:46 vm08 ceph-mon[82639]: osdmap e112: 8 total, 7 up, 8 in 2026-03-10T13:28:46.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:46 vm08 ceph-mon[82639]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]: dispatch 2026-03-10T13:28:46.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:46 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:46.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:46 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:46.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:46 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:28:46.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:46 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:28:46.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:46 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:46.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:46 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:28:46.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:46 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:46.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:46 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:46.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:46 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:28:46.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:46 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-10T13:28:47.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:47 vm00 ceph-mon[94470]: from='mon.2 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-10T13:28:47.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:47 vm00 ceph-mon[94470]: Upgrade: unsafe to stop osd(s) at this time (18 PGs are or would become offline) 2026-03-10T13:28:47.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:47 vm00 ceph-mon[94470]: pgmap v74: 161 pgs: 2 unknown, 36 active+undersized, 23 active+undersized+degraded, 100 active+clean; 457 KiB data, 183 MiB used, 160 GiB / 160 GiB avail; 79/627 objects degraded (12.600%) 2026-03-10T13:28:47.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:47 vm00 ceph-mon[94470]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T13:28:47.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:47 vm00 ceph-mon[94470]: Cluster is now healthy 2026-03-10T13:28:47.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:47 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:28:47.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:47 vm00 ceph-mon[94470]: osd.3 [v2:192.168.123.100:6826/2114396828,v1:192.168.123.100:6827/2114396828] boot 2026-03-10T13:28:47.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:47 vm00 ceph-mon[94470]: osdmap e113: 8 total, 8 up, 8 in 2026-03-10T13:28:47.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:47 vm00 ceph-mon[94470]: Health check failed: Degraded data redundancy: 79/627 objects degraded (12.600%), 23 pgs degraded (PG_DEGRADED) 2026-03-10T13:28:47.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:28:46 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:28:46.998Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:28:47.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:28:46 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:28:46.999Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:28:47.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:47 vm00 ceph-mon[96293]: from='mon.2 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-10T13:28:47.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:47 vm00 ceph-mon[96293]: Upgrade: unsafe to stop osd(s) at this time (18 PGs are or would become offline) 2026-03-10T13:28:47.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:47 vm00 ceph-mon[96293]: pgmap v74: 161 pgs: 2 unknown, 36 active+undersized, 23 active+undersized+degraded, 100 active+clean; 457 KiB data, 183 MiB used, 160 GiB / 160 GiB avail; 79/627 objects degraded (12.600%) 2026-03-10T13:28:47.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:47 vm00 ceph-mon[96293]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T13:28:47.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:47 vm00 ceph-mon[96293]: Cluster is now healthy 2026-03-10T13:28:47.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:47 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:28:47.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:47 vm00 ceph-mon[96293]: osd.3 [v2:192.168.123.100:6826/2114396828,v1:192.168.123.100:6827/2114396828] boot 2026-03-10T13:28:47.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:47 vm00 ceph-mon[96293]: osdmap e113: 8 total, 8 up, 8 in 2026-03-10T13:28:47.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:47 vm00 ceph-mon[96293]: Health check failed: Degraded data redundancy: 79/627 objects degraded (12.600%), 23 pgs degraded (PG_DEGRADED) 2026-03-10T13:28:47.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:47 vm08 ceph-mon[82639]: from='mon.2 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-10T13:28:47.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:47 vm08 ceph-mon[82639]: Upgrade: unsafe to stop osd(s) at this time (18 PGs are or would become offline) 2026-03-10T13:28:47.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:47 vm08 ceph-mon[82639]: pgmap v74: 161 pgs: 2 unknown, 36 active+undersized, 23 active+undersized+degraded, 100 active+clean; 457 KiB data, 183 MiB used, 160 GiB / 160 GiB avail; 79/627 objects degraded (12.600%) 2026-03-10T13:28:47.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:47 vm08 ceph-mon[82639]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T13:28:47.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:47 vm08 ceph-mon[82639]: Cluster is now healthy 2026-03-10T13:28:47.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:47 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T13:28:47.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:47 vm08 ceph-mon[82639]: osd.3 [v2:192.168.123.100:6826/2114396828,v1:192.168.123.100:6827/2114396828] boot 2026-03-10T13:28:47.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:47 vm08 ceph-mon[82639]: osdmap e113: 8 total, 8 up, 8 in 2026-03-10T13:28:47.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:47 vm08 ceph-mon[82639]: Health check failed: Degraded data redundancy: 79/627 objects degraded (12.600%), 23 pgs degraded (PG_DEGRADED) 2026-03-10T13:28:48.421 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:48 vm08 ceph-mon[82639]: osdmap e114: 8 total, 8 up, 8 in 2026-03-10T13:28:48.421 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:48 vm08 ceph-mon[82639]: pgmap v77: 161 pgs: 43 active+undersized, 23 active+undersized+degraded, 95 active+clean; 457 KiB data, 183 MiB used, 160 GiB / 160 GiB avail; 79/627 objects degraded (12.600%) 2026-03-10T13:28:48.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:48 vm00 ceph-mon[94470]: osdmap e114: 8 total, 8 up, 8 in 2026-03-10T13:28:48.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:48 vm00 ceph-mon[94470]: pgmap v77: 161 pgs: 43 active+undersized, 23 active+undersized+degraded, 95 active+clean; 457 KiB data, 183 MiB used, 160 GiB / 160 GiB avail; 79/627 objects degraded (12.600%) 2026-03-10T13:28:48.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:48 vm00 ceph-mon[96293]: osdmap e114: 8 total, 8 up, 8 in 2026-03-10T13:28:48.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:48 vm00 ceph-mon[96293]: pgmap v77: 161 pgs: 43 active+undersized, 23 active+undersized+degraded, 95 active+clean; 457 KiB data, 183 MiB used, 160 GiB / 160 GiB avail; 79/627 objects degraded (12.600%) 2026-03-10T13:28:50.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:50 vm00 ceph-mon[94470]: pgmap v78: 161 pgs: 27 active+undersized, 12 active+undersized+degraded, 122 active+clean; 457 KiB data, 184 MiB used, 160 GiB / 160 GiB avail; 43/627 objects degraded (6.858%) 2026-03-10T13:28:50.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:50 vm00 ceph-mon[96293]: pgmap v78: 161 pgs: 27 active+undersized, 12 active+undersized+degraded, 122 active+clean; 457 KiB data, 184 MiB used, 160 GiB / 160 GiB avail; 43/627 objects degraded (6.858%) 2026-03-10T13:28:50.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:50 vm08 ceph-mon[82639]: pgmap v78: 161 pgs: 27 active+undersized, 12 active+undersized+degraded, 122 active+clean; 457 KiB data, 184 MiB used, 160 GiB / 160 GiB avail; 43/627 objects degraded (6.858%) 2026-03-10T13:28:51.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:51 vm00 ceph-mon[94470]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:28:51.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:51 vm00 ceph-mon[96293]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:28:51.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:51 vm08 ceph-mon[82639]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:28:52.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:52 vm00 ceph-mon[94470]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 184 MiB used, 160 GiB / 160 GiB avail; 163 B/s rd, 0 op/s 2026-03-10T13:28:52.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:52 vm00 ceph-mon[94470]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 43/627 objects degraded (6.858%), 12 pgs degraded) 2026-03-10T13:28:52.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:52 vm00 ceph-mon[94470]: Cluster is now healthy 2026-03-10T13:28:52.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:52 vm00 ceph-mon[96293]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 184 MiB used, 160 GiB / 160 GiB avail; 163 B/s rd, 0 op/s 2026-03-10T13:28:52.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:52 vm00 ceph-mon[96293]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 43/627 objects degraded (6.858%), 12 pgs degraded) 2026-03-10T13:28:52.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:52 vm00 ceph-mon[96293]: Cluster is now healthy 2026-03-10T13:28:52.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:52 vm08 ceph-mon[82639]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 184 MiB used, 160 GiB / 160 GiB avail; 163 B/s rd, 0 op/s 2026-03-10T13:28:52.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:52 vm08 ceph-mon[82639]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 43/627 objects degraded (6.858%), 12 pgs degraded) 2026-03-10T13:28:52.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:52 vm08 ceph-mon[82639]: Cluster is now healthy 2026-03-10T13:28:54.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:54 vm00 ceph-mon[94470]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 184 MiB used, 160 GiB / 160 GiB avail; 127 B/s rd, 0 op/s 2026-03-10T13:28:54.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:54 vm00 ceph-mon[96293]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 184 MiB used, 160 GiB / 160 GiB avail; 127 B/s rd, 0 op/s 2026-03-10T13:28:54.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:54 vm08 ceph-mon[82639]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 184 MiB used, 160 GiB / 160 GiB avail; 127 B/s rd, 0 op/s 2026-03-10T13:28:56.002 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:28:55 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:28:55] "GET /metrics HTTP/1.1" 200 37750 "" "Prometheus/2.51.0" 2026-03-10T13:28:56.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:56 vm00 ceph-mon[94470]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 184 MiB used, 160 GiB / 160 GiB avail; 667 B/s rd, 0 op/s 2026-03-10T13:28:56.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:56 vm00 ceph-mon[96293]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 184 MiB used, 160 GiB / 160 GiB avail; 667 B/s rd, 0 op/s 2026-03-10T13:28:56.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:56 vm08 ceph-mon[82639]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 184 MiB used, 160 GiB / 160 GiB avail; 667 B/s rd, 0 op/s 2026-03-10T13:28:57.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:28:56 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:28:56.999Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:28:57.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:28:57 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:28:57.000Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:28:58.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:58 vm00 ceph-mon[94470]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 184 MiB used, 160 GiB / 160 GiB avail; 601 B/s rd, 0 op/s 2026-03-10T13:28:58.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:58 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:58.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:58 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:28:58.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:28:58 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:58.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:58 vm00 ceph-mon[96293]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 184 MiB used, 160 GiB / 160 GiB avail; 601 B/s rd, 0 op/s 2026-03-10T13:28:58.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:58 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:58.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:58 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:28:58.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:28:58 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:58.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:58 vm08 ceph-mon[82639]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 184 MiB used, 160 GiB / 160 GiB avail; 601 B/s rd, 0 op/s 2026-03-10T13:28:58.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:58 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:28:58.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:58 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:28:58.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:28:58 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:00.663 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:00 vm08 ceph-mon[82639]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 184 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s 2026-03-10T13:29:00.663 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:00 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-10T13:29:00.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:00 vm00 ceph-mon[94470]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 184 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s 2026-03-10T13:29:00.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:00 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-10T13:29:00.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:00 vm00 ceph-mon[96293]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 184 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s 2026-03-10T13:29:00.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:00 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-10T13:29:01.241 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:01 vm08 systemd[1]: Stopping Ceph osd.4 for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:29:01.508 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:01 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4[53134]: 2026-03-10T13:29:01.238+0000 7fde15fab700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.4 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T13:29:01.508 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:01 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4[53134]: 2026-03-10T13:29:01.238+0000 7fde15fab700 -1 osd.4 114 *** Got signal Terminated *** 2026-03-10T13:29:01.509 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:01 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4[53134]: 2026-03-10T13:29:01.238+0000 7fde15fab700 -1 osd.4 114 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T13:29:01.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:01 vm08 ceph-mon[82639]: from='mon.2 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-10T13:29:01.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:01 vm08 ceph-mon[82639]: Upgrade: osd.4 is safe to restart 2026-03-10T13:29:01.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:01 vm08 ceph-mon[82639]: Upgrade: Updating osd.4 2026-03-10T13:29:01.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:01 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:01.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:01 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-10T13:29:01.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:01 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:29:01.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:01 vm08 ceph-mon[82639]: Deploying daemon osd.4 on vm08 2026-03-10T13:29:01.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:01 vm08 ceph-mon[82639]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:29:01.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:01 vm08 ceph-mon[82639]: osd.4 marked itself down and dead 2026-03-10T13:29:01.771 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:01 vm08 podman[90356]: 2026-03-10 13:29:01.561609875 +0000 UTC m=+0.334756655 container died e349440ca776385e57ee7d64a83c2aad52e4b6a96ea949222f8ce3ead7071de0 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4, io.buildah.version=1.19.8, com.redhat.component=centos-stream-container, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vcs-type=git, io.k8s.display-name=CentOS Stream 8, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, CEPH_POINT_RELEASE=-17.2.0, ceph=True, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, version=8, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, io.openshift.expose-services=, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=754, architecture=x86_64, io.openshift.tags=base centos centos-stream, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, distribution-scope=public, maintainer=Guillaume Abrioux , name=centos-stream, GIT_CLEAN=True, vendor=Red Hat, Inc., GIT_REPO=https://github.com/ceph/ceph-container.git, build-date=2022-05-03T08:36:31.336870, GIT_BRANCH=HEAD, RELEASE=HEAD) 2026-03-10T13:29:01.771 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:01 vm08 podman[90356]: 2026-03-10 13:29:01.592460576 +0000 UTC m=+0.365607356 container remove e349440ca776385e57ee7d64a83c2aad52e4b6a96ea949222f8ce3ead7071de0 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4, vcs-type=git, io.openshift.tags=base centos centos-stream, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.buildah.version=1.19.8, io.k8s.display-name=CentOS Stream 8, io.openshift.expose-services=, ceph=True, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vendor=Red Hat, Inc., com.redhat.component=centos-stream-container, GIT_CLEAN=True, GIT_REPO=https://github.com/ceph/ceph-container.git, build-date=2022-05-03T08:36:31.336870, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Guillaume Abrioux , architecture=x86_64, distribution-scope=public, name=centos-stream, release=754, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, GIT_BRANCH=HEAD, RELEASE=HEAD, version=8, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, CEPH_POINT_RELEASE=-17.2.0) 2026-03-10T13:29:01.771 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:01 vm08 bash[90356]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4 2026-03-10T13:29:01.771 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:01 vm08 podman[90424]: 2026-03-10 13:29:01.756396383 +0000 UTC m=+0.014568341 container create c747dcbde135e86f2866a37aec2bbd439ee6c47c5b5580c4ff3c57df011010e5 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4-deactivate, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T13:29:02.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:01 vm00 ceph-mon[96293]: from='mon.2 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-10T13:29:02.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:01 vm00 ceph-mon[96293]: Upgrade: osd.4 is safe to restart 2026-03-10T13:29:02.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:01 vm00 ceph-mon[96293]: Upgrade: Updating osd.4 2026-03-10T13:29:02.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:01 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:02.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:01 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-10T13:29:02.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:01 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:29:02.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:01 vm00 ceph-mon[96293]: Deploying daemon osd.4 on vm08 2026-03-10T13:29:02.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:01 vm00 ceph-mon[96293]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:29:02.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:01 vm00 ceph-mon[96293]: osd.4 marked itself down and dead 2026-03-10T13:29:02.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:01 vm00 ceph-mon[94470]: from='mon.2 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-10T13:29:02.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:01 vm00 ceph-mon[94470]: Upgrade: osd.4 is safe to restart 2026-03-10T13:29:02.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:01 vm00 ceph-mon[94470]: Upgrade: Updating osd.4 2026-03-10T13:29:02.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:01 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:02.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:01 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-10T13:29:02.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:01 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:29:02.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:01 vm00 ceph-mon[94470]: Deploying daemon osd.4 on vm08 2026-03-10T13:29:02.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:01 vm00 ceph-mon[94470]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:29:02.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:01 vm00 ceph-mon[94470]: osd.4 marked itself down and dead 2026-03-10T13:29:02.028 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:01 vm08 podman[90424]: 2026-03-10 13:29:01.792273804 +0000 UTC m=+0.050445762 container init c747dcbde135e86f2866a37aec2bbd439ee6c47c5b5580c4ff3c57df011010e5 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, OSD_FLAVOR=default, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0) 2026-03-10T13:29:02.028 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:01 vm08 podman[90424]: 2026-03-10 13:29:01.794778643 +0000 UTC m=+0.052950591 container start c747dcbde135e86f2866a37aec2bbd439ee6c47c5b5580c4ff3c57df011010e5 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4-deactivate, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, CEPH_REF=squid, OSD_FLAVOR=default) 2026-03-10T13:29:02.028 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:01 vm08 podman[90424]: 2026-03-10 13:29:01.798320394 +0000 UTC m=+0.056492352 container attach c747dcbde135e86f2866a37aec2bbd439ee6c47c5b5580c4ff3c57df011010e5 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4-deactivate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223) 2026-03-10T13:29:02.028 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:01 vm08 podman[90424]: 2026-03-10 13:29:01.750898282 +0000 UTC m=+0.009070241 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:29:02.028 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:01 vm08 podman[90424]: 2026-03-10 13:29:01.918727338 +0000 UTC m=+0.176899296 container died c747dcbde135e86f2866a37aec2bbd439ee6c47c5b5580c4ff3c57df011010e5 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4-deactivate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T13:29:02.028 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:01 vm08 podman[90424]: 2026-03-10 13:29:01.938500001 +0000 UTC m=+0.196671959 container remove c747dcbde135e86f2866a37aec2bbd439ee6c47c5b5580c4ff3c57df011010e5 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4-deactivate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, CEPH_REF=squid, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-10T13:29:02.028 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:01 vm08 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.4.service: Deactivated successfully. 2026-03-10T13:29:02.028 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:01 vm08 systemd[1]: Stopped Ceph osd.4 for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:29:02.028 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:01 vm08 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.4.service: Consumed 22.706s CPU time. 2026-03-10T13:29:02.526 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:02 vm08 systemd[1]: Starting Ceph osd.4 for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:29:02.526 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:02 vm08 podman[90527]: 2026-03-10 13:29:02.20679376 +0000 UTC m=+0.016654167 container create 986ee6aaa67fcb36af873d38811ef2c4ba3f0b5963e71c49d5a2985eef2bd082 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4-activate, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, CEPH_REF=squid, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, ceph=True) 2026-03-10T13:29:02.526 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:02 vm08 podman[90527]: 2026-03-10 13:29:02.25100443 +0000 UTC m=+0.060864856 container init 986ee6aaa67fcb36af873d38811ef2c4ba3f0b5963e71c49d5a2985eef2bd082 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4-activate, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, ceph=True) 2026-03-10T13:29:02.526 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:02 vm08 podman[90527]: 2026-03-10 13:29:02.253746453 +0000 UTC m=+0.063606869 container start 986ee6aaa67fcb36af873d38811ef2c4ba3f0b5963e71c49d5a2985eef2bd082 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4-activate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, CEPH_REF=squid) 2026-03-10T13:29:02.526 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:02 vm08 podman[90527]: 2026-03-10 13:29:02.256404659 +0000 UTC m=+0.066265065 container attach 986ee6aaa67fcb36af873d38811ef2c4ba3f0b5963e71c49d5a2985eef2bd082 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4-activate, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_REF=squid) 2026-03-10T13:29:02.526 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:02 vm08 podman[90527]: 2026-03-10 13:29:02.199634228 +0000 UTC m=+0.009494644 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:29:02.526 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:02 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4-activate[90539]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:29:02.526 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:02 vm08 bash[90527]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:29:02.526 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:02 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4-activate[90539]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:29:02.526 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:02 vm08 bash[90527]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:29:02.871 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:02 vm08 ceph-mon[82639]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 184 MiB used, 160 GiB / 160 GiB avail; 938 B/s rd, 0 op/s 2026-03-10T13:29:02.871 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:02 vm08 ceph-mon[82639]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T13:29:02.871 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:02 vm08 ceph-mon[82639]: osdmap e115: 8 total, 7 up, 8 in 2026-03-10T13:29:02.872 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:02 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4-activate[90539]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T13:29:03.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:02 vm00 ceph-mon[94470]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 184 MiB used, 160 GiB / 160 GiB avail; 938 B/s rd, 0 op/s 2026-03-10T13:29:03.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:02 vm00 ceph-mon[94470]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T13:29:03.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:02 vm00 ceph-mon[94470]: osdmap e115: 8 total, 7 up, 8 in 2026-03-10T13:29:03.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:02 vm00 ceph-mon[96293]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 184 MiB used, 160 GiB / 160 GiB avail; 938 B/s rd, 0 op/s 2026-03-10T13:29:03.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:02 vm00 ceph-mon[96293]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T13:29:03.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:02 vm00 ceph-mon[96293]: osdmap e115: 8 total, 7 up, 8 in 2026-03-10T13:29:03.204 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:02 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4-activate[90539]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:29:03.204 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:02 vm08 bash[90527]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T13:29:03.204 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:02 vm08 bash[90527]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:29:03.204 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:02 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4-activate[90539]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:29:03.204 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:02 vm08 bash[90527]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:29:03.204 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:02 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4-activate[90539]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-10T13:29:03.204 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:02 vm08 bash[90527]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-10T13:29:03.204 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:02 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4-activate[90539]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-f7998813-6a58-4b81-b60b-3f8b4487743a/osd-block-2ec681f9-baf2-471e-8b59-1a1b47be1367 --path /var/lib/ceph/osd/ceph-4 --no-mon-config 2026-03-10T13:29:03.204 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:02 vm08 bash[90527]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-f7998813-6a58-4b81-b60b-3f8b4487743a/osd-block-2ec681f9-baf2-471e-8b59-1a1b47be1367 --path /var/lib/ceph/osd/ceph-4 --no-mon-config 2026-03-10T13:29:03.204 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:03 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4-activate[90539]: Running command: /usr/bin/ln -snf /dev/ceph-f7998813-6a58-4b81-b60b-3f8b4487743a/osd-block-2ec681f9-baf2-471e-8b59-1a1b47be1367 /var/lib/ceph/osd/ceph-4/block 2026-03-10T13:29:03.204 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:03 vm08 bash[90527]: Running command: /usr/bin/ln -snf /dev/ceph-f7998813-6a58-4b81-b60b-3f8b4487743a/osd-block-2ec681f9-baf2-471e-8b59-1a1b47be1367 /var/lib/ceph/osd/ceph-4/block 2026-03-10T13:29:03.521 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:03 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4-activate[90539]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-4/block 2026-03-10T13:29:03.521 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:03 vm08 bash[90527]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-4/block 2026-03-10T13:29:03.521 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:03 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4-activate[90539]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-10T13:29:03.521 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:03 vm08 bash[90527]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-10T13:29:03.521 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:03 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4-activate[90539]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-10T13:29:03.521 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:03 vm08 bash[90527]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-10T13:29:03.521 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:03 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4-activate[90539]: --> ceph-volume lvm activate successful for osd ID: 4 2026-03-10T13:29:03.521 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:03 vm08 bash[90527]: --> ceph-volume lvm activate successful for osd ID: 4 2026-03-10T13:29:03.521 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:03 vm08 conmon[90539]: conmon 986ee6aaa67fcb36af87 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-986ee6aaa67fcb36af873d38811ef2c4ba3f0b5963e71c49d5a2985eef2bd082.scope/container/memory.events 2026-03-10T13:29:03.521 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:03 vm08 podman[90527]: 2026-03-10 13:29:03.229101028 +0000 UTC m=+1.038961433 container died 986ee6aaa67fcb36af873d38811ef2c4ba3f0b5963e71c49d5a2985eef2bd082 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4-activate, CEPH_REF=squid, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default) 2026-03-10T13:29:03.521 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:03 vm08 podman[90527]: 2026-03-10 13:29:03.246927067 +0000 UTC m=+1.056787483 container remove 986ee6aaa67fcb36af873d38811ef2c4ba3f0b5963e71c49d5a2985eef2bd082 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4-activate, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2) 2026-03-10T13:29:03.521 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:03 vm08 podman[90794]: 2026-03-10 13:29:03.330173129 +0000 UTC m=+0.017721746 container create 5b92674798b7c7880e9cc0b97f4dcb5ee20701cae3f8b0c51de22a9e7918ec38 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4, OSD_FLAVOR=default, CEPH_REF=squid, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T13:29:03.521 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:03 vm08 podman[90794]: 2026-03-10 13:29:03.363293959 +0000 UTC m=+0.050842566 container init 5b92674798b7c7880e9cc0b97f4dcb5ee20701cae3f8b0c51de22a9e7918ec38 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, ceph=True, org.label-schema.schema-version=1.0) 2026-03-10T13:29:03.521 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:03 vm08 podman[90794]: 2026-03-10 13:29:03.36548455 +0000 UTC m=+0.053033167 container start 5b92674798b7c7880e9cc0b97f4dcb5ee20701cae3f8b0c51de22a9e7918ec38 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T13:29:03.521 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:03 vm08 bash[90794]: 5b92674798b7c7880e9cc0b97f4dcb5ee20701cae3f8b0c51de22a9e7918ec38 2026-03-10T13:29:03.521 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:03 vm08 podman[90794]: 2026-03-10 13:29:03.32202212 +0000 UTC m=+0.009570737 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:29:03.521 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:03 vm08 systemd[1]: Started Ceph osd.4 for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:29:03.888 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:03 vm08 ceph-mon[82639]: osdmap e116: 8 total, 7 up, 8 in 2026-03-10T13:29:03.888 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:03 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:03.888 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:03 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:03.888 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:03 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4[90804]: 2026-03-10T13:29:03.700+0000 7f3df7c82740 -1 Falling back to public interface 2026-03-10T13:29:04.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:03 vm00 ceph-mon[94470]: osdmap e116: 8 total, 7 up, 8 in 2026-03-10T13:29:04.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:03 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:04.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:03 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:04.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:03 vm00 ceph-mon[96293]: osdmap e116: 8 total, 7 up, 8 in 2026-03-10T13:29:04.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:03 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:04.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:03 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:04.502 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:29:04 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:29:04.148Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:29:04.520 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:04 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4[90804]: 2026-03-10T13:29:04.308+0000 7f3df7c82740 -1 osd.4 0 read_superblock omap replica is missing. 2026-03-10T13:29:04.520 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:04 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4[90804]: 2026-03-10T13:29:04.322+0000 7f3df7c82740 -1 osd.4 114 log_to_monitors true 2026-03-10T13:29:05.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:04 vm00 ceph-mon[94470]: pgmap v87: 161 pgs: 23 stale+active+clean, 138 active+clean; 457 KiB data, 184 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-10T13:29:05.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:04 vm00 ceph-mon[94470]: from='osd.4 [v2:192.168.123.108:6800/2431038923,v1:192.168.123.108:6801/2431038923]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T13:29:05.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:04 vm00 ceph-mon[94470]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T13:29:05.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:04 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:05.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:04 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:05.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:04 vm00 ceph-mon[96293]: pgmap v87: 161 pgs: 23 stale+active+clean, 138 active+clean; 457 KiB data, 184 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-10T13:29:05.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:04 vm00 ceph-mon[96293]: from='osd.4 [v2:192.168.123.108:6800/2431038923,v1:192.168.123.108:6801/2431038923]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T13:29:05.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:04 vm00 ceph-mon[96293]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T13:29:05.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:04 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:05.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:04 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:05.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:04 vm08 ceph-mon[82639]: pgmap v87: 161 pgs: 23 stale+active+clean, 138 active+clean; 457 KiB data, 184 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-10T13:29:05.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:04 vm08 ceph-mon[82639]: from='osd.4 [v2:192.168.123.108:6800/2431038923,v1:192.168.123.108:6801/2431038923]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T13:29:05.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:04 vm08 ceph-mon[82639]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T13:29:05.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:04 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:05.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:04 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:06.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:05 vm00 ceph-mon[94470]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-10T13:29:06.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:05 vm00 ceph-mon[94470]: osdmap e117: 8 total, 7 up, 8 in 2026-03-10T13:29:06.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:05 vm00 ceph-mon[94470]: from='osd.4 [v2:192.168.123.108:6800/2431038923,v1:192.168.123.108:6801/2431038923]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:29:06.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:05 vm00 ceph-mon[94470]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:29:06.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:05 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:06.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:05 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:06.003 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:29:05 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:29:05] "GET /metrics HTTP/1.1" 200 37752 "" "Prometheus/2.51.0" 2026-03-10T13:29:06.003 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:29:05 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:29:05.769+0000 7f1445d54640 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (14 PGs are or would become offline) 2026-03-10T13:29:06.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:05 vm00 ceph-mon[96293]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-10T13:29:06.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:05 vm00 ceph-mon[96293]: osdmap e117: 8 total, 7 up, 8 in 2026-03-10T13:29:06.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:05 vm00 ceph-mon[96293]: from='osd.4 [v2:192.168.123.108:6800/2431038923,v1:192.168.123.108:6801/2431038923]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:29:06.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:05 vm00 ceph-mon[96293]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:29:06.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:05 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:06.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:05 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:06.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:05 vm08 ceph-mon[82639]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-10T13:29:06.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:05 vm08 ceph-mon[82639]: osdmap e117: 8 total, 7 up, 8 in 2026-03-10T13:29:06.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:05 vm08 ceph-mon[82639]: from='osd.4 [v2:192.168.123.108:6800/2431038923,v1:192.168.123.108:6801/2431038923]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:29:06.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:05 vm08 ceph-mon[82639]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:29:06.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:05 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:06.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:05 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:06.020 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:29:05 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4[90804]: 2026-03-10T13:29:05.522+0000 7f3def22c640 -1 osd.4 114 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T13:29:07.001 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:06 vm00 ceph-mon[94470]: pgmap v89: 161 pgs: 2 peering, 37 active+undersized, 1 stale+active+clean, 25 active+undersized+degraded, 96 active+clean; 457 KiB data, 210 MiB used, 160 GiB / 160 GiB avail; 101/627 objects degraded (16.108%) 2026-03-10T13:29:07.001 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:06 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:07.001 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:06 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:07.001 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:06 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:29:07.001 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:06 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:29:07.001 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:06 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:07.001 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:06 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:29:07.001 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:06 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:29:07.001 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:06 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:29:07.001 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:06 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:29:07.001 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:06 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-10T13:29:07.001 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:06 vm00 ceph-mon[94470]: from='mon.2 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-10T13:29:07.001 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:06 vm00 ceph-mon[94470]: Upgrade: unsafe to stop osd(s) at this time (14 PGs are or would become offline) 2026-03-10T13:29:07.001 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:06 vm00 ceph-mon[94470]: Health check failed: Degraded data redundancy: 101/627 objects degraded (16.108%), 25 pgs degraded (PG_DEGRADED) 2026-03-10T13:29:07.001 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:06 vm00 ceph-mon[94470]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T13:29:07.001 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:06 vm00 ceph-mon[94470]: Cluster is now healthy 2026-03-10T13:29:07.001 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:06 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:29:07.001 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:06 vm00 ceph-mon[94470]: osd.4 [v2:192.168.123.108:6800/2431038923,v1:192.168.123.108:6801/2431038923] boot 2026-03-10T13:29:07.001 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:06 vm00 ceph-mon[94470]: osdmap e118: 8 total, 8 up, 8 in 2026-03-10T13:29:07.001 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:06 vm00 ceph-mon[96293]: pgmap v89: 161 pgs: 2 peering, 37 active+undersized, 1 stale+active+clean, 25 active+undersized+degraded, 96 active+clean; 457 KiB data, 210 MiB used, 160 GiB / 160 GiB avail; 101/627 objects degraded (16.108%) 2026-03-10T13:29:07.001 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:06 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:07.001 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:06 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:07.001 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:06 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:29:07.001 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:06 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:29:07.001 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:06 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:07.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:06 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:29:07.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:06 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:29:07.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:06 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:29:07.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:06 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:29:07.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:06 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-10T13:29:07.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:06 vm00 ceph-mon[96293]: from='mon.2 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-10T13:29:07.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:06 vm00 ceph-mon[96293]: Upgrade: unsafe to stop osd(s) at this time (14 PGs are or would become offline) 2026-03-10T13:29:07.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:06 vm00 ceph-mon[96293]: Health check failed: Degraded data redundancy: 101/627 objects degraded (16.108%), 25 pgs degraded (PG_DEGRADED) 2026-03-10T13:29:07.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:06 vm00 ceph-mon[96293]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T13:29:07.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:06 vm00 ceph-mon[96293]: Cluster is now healthy 2026-03-10T13:29:07.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:06 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:29:07.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:06 vm00 ceph-mon[96293]: osd.4 [v2:192.168.123.108:6800/2431038923,v1:192.168.123.108:6801/2431038923] boot 2026-03-10T13:29:07.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:06 vm00 ceph-mon[96293]: osdmap e118: 8 total, 8 up, 8 in 2026-03-10T13:29:07.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:06 vm08 ceph-mon[82639]: pgmap v89: 161 pgs: 2 peering, 37 active+undersized, 1 stale+active+clean, 25 active+undersized+degraded, 96 active+clean; 457 KiB data, 210 MiB used, 160 GiB / 160 GiB avail; 101/627 objects degraded (16.108%) 2026-03-10T13:29:07.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:06 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:07.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:06 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:07.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:06 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:29:07.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:06 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:29:07.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:06 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:07.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:06 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:29:07.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:06 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:29:07.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:06 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:29:07.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:06 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:29:07.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:06 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-10T13:29:07.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:06 vm08 ceph-mon[82639]: from='mon.2 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-10T13:29:07.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:06 vm08 ceph-mon[82639]: Upgrade: unsafe to stop osd(s) at this time (14 PGs are or would become offline) 2026-03-10T13:29:07.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:06 vm08 ceph-mon[82639]: Health check failed: Degraded data redundancy: 101/627 objects degraded (16.108%), 25 pgs degraded (PG_DEGRADED) 2026-03-10T13:29:07.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:06 vm08 ceph-mon[82639]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T13:29:07.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:06 vm08 ceph-mon[82639]: Cluster is now healthy 2026-03-10T13:29:07.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:06 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T13:29:07.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:06 vm08 ceph-mon[82639]: osd.4 [v2:192.168.123.108:6800/2431038923,v1:192.168.123.108:6801/2431038923] boot 2026-03-10T13:29:07.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:06 vm08 ceph-mon[82639]: osdmap e118: 8 total, 8 up, 8 in 2026-03-10T13:29:07.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:29:07 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:29:07.000Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:29:07.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:29:07 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:29:07.001Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:29:08.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:07 vm00 ceph-mon[94470]: OSD bench result of 29735.228014 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.4. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-10T13:29:08.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:07 vm00 ceph-mon[94470]: osdmap e119: 8 total, 8 up, 8 in 2026-03-10T13:29:08.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:07 vm00 ceph-mon[96293]: OSD bench result of 29735.228014 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.4. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-10T13:29:08.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:07 vm00 ceph-mon[96293]: osdmap e119: 8 total, 8 up, 8 in 2026-03-10T13:29:08.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:07 vm08 ceph-mon[82639]: OSD bench result of 29735.228014 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.4. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-10T13:29:08.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:07 vm08 ceph-mon[82639]: osdmap e119: 8 total, 8 up, 8 in 2026-03-10T13:29:09.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:08 vm00 ceph-mon[96293]: pgmap v92: 161 pgs: 41 active+undersized, 27 active+undersized+degraded, 93 active+clean; 457 KiB data, 210 MiB used, 160 GiB / 160 GiB avail; 107/627 objects degraded (17.065%) 2026-03-10T13:29:09.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:08 vm00 ceph-mon[94470]: pgmap v92: 161 pgs: 41 active+undersized, 27 active+undersized+degraded, 93 active+clean; 457 KiB data, 210 MiB used, 160 GiB / 160 GiB avail; 107/627 objects degraded (17.065%) 2026-03-10T13:29:09.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:08 vm08 ceph-mon[82639]: pgmap v92: 161 pgs: 41 active+undersized, 27 active+undersized+degraded, 93 active+clean; 457 KiB data, 210 MiB used, 160 GiB / 160 GiB avail; 107/627 objects degraded (17.065%) 2026-03-10T13:29:10.763 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T13:29:11.025 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:10 vm00 ceph-mon[94470]: pgmap v93: 161 pgs: 25 active+undersized, 12 active+undersized+degraded, 124 active+clean; 457 KiB data, 211 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T13:29:11.026 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:10 vm00 ceph-mon[96293]: pgmap v93: 161 pgs: 25 active+undersized, 12 active+undersized+degraded, 124 active+clean; 457 KiB data, 211 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T13:29:11.164 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T13:29:11.164 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (15m) 27s ago 22m 26.4M - 0.25.0 c8568f914cd2 12fde3cf83cb 2026-03-10T13:29:11.164 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm08 *:3000 running (3m) 6s ago 21m 79.3M - 10.4.0 c8b91775d855 960e32589e98 2026-03-10T13:29:11.164 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dezodo vm00 running (3m) 27s ago 21m 49.4M - 3.5 e1d6a67b021e 630bf6d4e7f3 2026-03-10T13:29:11.164 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm08 *:8443,9283,8765 running (3m) 6s ago 23m 488M - 19.2.3-678-ge911bdeb 654f31e6858e 31b91eebc856 2026-03-10T13:29:11.164 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (10m) 27s ago 24m 555M - 19.2.3-678-ge911bdeb 654f31e6858e 5bc576d4d32b 2026-03-10T13:29:11.164 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (2m) 27s ago 24m 49.3M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 981df6371890 2026-03-10T13:29:11.164 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm08 running (2m) 6s ago 24m 46.3M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 8cceb678a9ee 2026-03-10T13:29:11.164 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (2m) 27s ago 24m 40.0M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 43deda66dee3 2026-03-10T13:29:11.164 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (15m) 27s ago 22m 10.8M - 1.7.0 72c9c2088986 bcf883401619 2026-03-10T13:29:11.164 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm08 *:9100 running (15m) 6s ago 22m 10.0M - 1.7.0 72c9c2088986 4ac83f03f818 2026-03-10T13:29:11.164 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (99s) 27s ago 23m 46.9M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 5fc74f4d2179 2026-03-10T13:29:11.164 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (92s) 27s ago 23m 68.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e dc65e199e9eb 2026-03-10T13:29:11.164 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (63s) 27s ago 23m 66.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 706171e0f5c2 2026-03-10T13:29:11.164 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (28s) 27s ago 23m 13.0M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 8739c77cf14d 2026-03-10T13:29:11.164 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm08 running (7s) 6s ago 23m 15.3M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 5b92674798b7 2026-03-10T13:29:11.164 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm08 running (22m) 6s ago 22m 60.4M 4096M 17.2.0 e1d6a67b021e 9a6a2a67cc99 2026-03-10T13:29:11.164 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm08 running (22m) 6s ago 22m 55.8M 4096M 17.2.0 e1d6a67b021e c27676916d52 2026-03-10T13:29:11.164 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm08 running (22m) 6s ago 22m 58.2M 4096M 17.2.0 e1d6a67b021e bf67951990a5 2026-03-10T13:29:11.164 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm08 *:9095 running (3m) 6s ago 22m 48.4M - 2.51.0 1d3b7f56885b 3f9b2d0821c9 2026-03-10T13:29:11.164 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.tvlvzo vm00 *:8000 running (21m) 27s ago 21m 98.1M - 17.2.0 e1d6a67b021e be24eac16807 2026-03-10T13:29:11.164 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm08.ljayps vm08 *:8000 running (21m) 6s ago 21m 95.5M - 17.2.0 e1d6a67b021e 67b525427823 2026-03-10T13:29:11.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:10 vm08 ceph-mon[82639]: pgmap v93: 161 pgs: 25 active+undersized, 12 active+undersized+degraded, 124 active+clean; 457 KiB data, 211 MiB used, 160 GiB / 160 GiB avail; 40/627 objects degraded (6.380%) 2026-03-10T13:29:11.396 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:29:11.396 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T13:29:11.396 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-10T13:29:11.396 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:29:11.396 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T13:29:11.396 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T13:29:11.396 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:29:11.396 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T13:29:11.396 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3, 2026-03-10T13:29:11.396 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 5 2026-03-10T13:29:11.396 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:29:11.396 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T13:29:11.396 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-10T13:29:11.396 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:29:11.396 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T13:29:11.396 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 5, 2026-03-10T13:29:11.396 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 10 2026-03-10T13:29:11.396 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T13:29:11.396 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:29:11.594 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:29:11.594 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T13:29:11.594 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": true, 2026-03-10T13:29:11.594 INFO:teuthology.orchestra.run.vm00.stdout: "which": "Upgrading daemons of type(s) crash,osd", 2026-03-10T13:29:11.594 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [], 2026-03-10T13:29:11.594 INFO:teuthology.orchestra.run.vm00.stdout: "progress": "5/8 daemons upgraded", 2026-03-10T13:29:11.594 INFO:teuthology.orchestra.run.vm00.stdout: "message": "Currently upgrading osd daemons", 2026-03-10T13:29:11.594 INFO:teuthology.orchestra.run.vm00.stdout: "is_paused": false 2026-03-10T13:29:11.594 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:29:12.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:11 vm00 ceph-mon[96293]: from='client.54247 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:29:12.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:11 vm00 ceph-mon[96293]: from='client.44325 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:29:12.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:11 vm00 ceph-mon[96293]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:29:12.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:11 vm00 ceph-mon[96293]: from='client.? 192.168.123.100:0/2765455260' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:29:12.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:11 vm00 ceph-mon[94470]: from='client.54247 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:29:12.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:11 vm00 ceph-mon[94470]: from='client.44325 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:29:12.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:11 vm00 ceph-mon[94470]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:29:12.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:11 vm00 ceph-mon[94470]: from='client.? 192.168.123.100:0/2765455260' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:29:12.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:11 vm08 ceph-mon[82639]: from='client.54247 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:29:12.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:11 vm08 ceph-mon[82639]: from='client.44325 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:29:12.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:11 vm08 ceph-mon[82639]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:29:12.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:11 vm08 ceph-mon[82639]: from='client.? 192.168.123.100:0/2765455260' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:29:13.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:12 vm00 ceph-mon[94470]: from='client.44331 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:29:13.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:12 vm00 ceph-mon[94470]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 611 MiB used, 159 GiB / 160 GiB avail; 303 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T13:29:13.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:12 vm00 ceph-mon[94470]: from='client.34346 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:29:13.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:12 vm00 ceph-mon[94470]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 40/627 objects degraded (6.380%), 12 pgs degraded) 2026-03-10T13:29:13.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:12 vm00 ceph-mon[94470]: Cluster is now healthy 2026-03-10T13:29:13.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:12 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:29:13.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:12 vm00 ceph-mon[96293]: from='client.44331 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:29:13.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:12 vm00 ceph-mon[96293]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 611 MiB used, 159 GiB / 160 GiB avail; 303 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T13:29:13.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:12 vm00 ceph-mon[96293]: from='client.34346 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:29:13.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:12 vm00 ceph-mon[96293]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 40/627 objects degraded (6.380%), 12 pgs degraded) 2026-03-10T13:29:13.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:12 vm00 ceph-mon[96293]: Cluster is now healthy 2026-03-10T13:29:13.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:12 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:29:13.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:12 vm08 ceph-mon[82639]: from='client.44331 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:29:13.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:12 vm08 ceph-mon[82639]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 611 MiB used, 159 GiB / 160 GiB avail; 303 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T13:29:13.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:12 vm08 ceph-mon[82639]: from='client.34346 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:29:13.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:12 vm08 ceph-mon[82639]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 40/627 objects degraded (6.380%), 12 pgs degraded) 2026-03-10T13:29:13.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:12 vm08 ceph-mon[82639]: Cluster is now healthy 2026-03-10T13:29:13.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:12 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:29:14.502 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:29:14 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:29:14.148Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:29:14.503 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:29:14 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:29:14.148Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:29:15.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:14 vm00 ceph-mon[94470]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 611 MiB used, 159 GiB / 160 GiB avail; 511 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T13:29:15.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:14 vm00 ceph-mon[96293]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 611 MiB used, 159 GiB / 160 GiB avail; 511 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T13:29:15.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:14 vm08 ceph-mon[82639]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 611 MiB used, 159 GiB / 160 GiB avail; 511 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T13:29:16.002 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:29:15 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:29:15] "GET /metrics HTTP/1.1" 200 37777 "" "Prometheus/2.51.0" 2026-03-10T13:29:17.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:16 vm00 ceph-mon[94470]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 211 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T13:29:17.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:16 vm00 ceph-mon[96293]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 211 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T13:29:17.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:29:17 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:29:17.001Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:29:17.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:29:17 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:29:17.002Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:29:17.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:16 vm08 ceph-mon[82639]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 211 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T13:29:18.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:18 vm00 ceph-mon[94470]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 211 MiB used, 160 GiB / 160 GiB avail; 982 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T13:29:18.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:18 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:18.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:18 vm00 ceph-mon[96293]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 211 MiB used, 160 GiB / 160 GiB avail; 982 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T13:29:18.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:18 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:18.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:18 vm08 ceph-mon[82639]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 211 MiB used, 160 GiB / 160 GiB avail; 982 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T13:29:18.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:18 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:20.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:20 vm00 ceph-mon[94470]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 211 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T13:29:20.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:20 vm00 ceph-mon[96293]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 211 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T13:29:20.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:20 vm08 ceph-mon[82639]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 211 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T13:29:21.632 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:21 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-10T13:29:21.632 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:21 vm08 ceph-mon[82639]: from='mon.2 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-10T13:29:21.632 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:21 vm08 ceph-mon[82639]: Upgrade: osd.5 is safe to restart 2026-03-10T13:29:21.632 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:21 vm08 ceph-mon[82639]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:29:21.632 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:21 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:21.632 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:21 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-10T13:29:21.632 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:21 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:29:21.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:21 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-10T13:29:21.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:21 vm00 ceph-mon[94470]: from='mon.2 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-10T13:29:21.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:21 vm00 ceph-mon[94470]: Upgrade: osd.5 is safe to restart 2026-03-10T13:29:21.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:21 vm00 ceph-mon[94470]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:29:21.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:21 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:21.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:21 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-10T13:29:21.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:21 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:29:21.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:21 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-10T13:29:21.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:21 vm00 ceph-mon[96293]: from='mon.2 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-10T13:29:21.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:21 vm00 ceph-mon[96293]: Upgrade: osd.5 is safe to restart 2026-03-10T13:29:21.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:21 vm00 ceph-mon[96293]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:29:21.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:21 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:21.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:21 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-10T13:29:21.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:21 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:29:22.271 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:21 vm08 systemd[1]: Stopping Ceph osd.5 for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:29:22.271 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:22 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5[55920]: 2026-03-10T13:29:22.050+0000 7f1e3b7fa700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.5 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T13:29:22.271 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:22 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5[55920]: 2026-03-10T13:29:22.050+0000 7f1e3b7fa700 -1 osd.5 119 *** Got signal Terminated *** 2026-03-10T13:29:22.271 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:22 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5[55920]: 2026-03-10T13:29:22.050+0000 7f1e3b7fa700 -1 osd.5 119 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T13:29:22.687 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:22 vm08 ceph-mon[82639]: Upgrade: Updating osd.5 2026-03-10T13:29:22.687 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:22 vm08 ceph-mon[82639]: Deploying daemon osd.5 on vm08 2026-03-10T13:29:22.687 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:22 vm08 ceph-mon[82639]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 211 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T13:29:22.687 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:22 vm08 ceph-mon[82639]: osd.5 marked itself down and dead 2026-03-10T13:29:22.687 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:22 vm08 podman[94145]: 2026-03-10 13:29:22.472622097 +0000 UTC m=+0.435737756 container died 9a6a2a67cc999e3bd61808cc0a684e6665726c9d07805a3e647dbfa3c38b043e (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5, version=8, com.redhat.component=centos-stream-container, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, architecture=x86_64, io.openshift.expose-services=, GIT_BRANCH=HEAD, io.k8s.display-name=CentOS Stream 8, vcs-type=git, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, release=754, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_REPO=https://github.com/ceph/ceph-container.git, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, GIT_CLEAN=True, RELEASE=HEAD, io.openshift.tags=base centos centos-stream, vendor=Red Hat, Inc., build-date=2022-05-03T08:36:31.336870, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, maintainer=Guillaume Abrioux , ceph=True, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, CEPH_POINT_RELEASE=-17.2.0, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.buildah.version=1.19.8, name=centos-stream, distribution-scope=public) 2026-03-10T13:29:22.687 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:22 vm08 podman[94145]: 2026-03-10 13:29:22.507330419 +0000 UTC m=+0.470446078 container remove 9a6a2a67cc999e3bd61808cc0a684e6665726c9d07805a3e647dbfa3c38b043e (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5, architecture=x86_64, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.openshift.expose-services=, distribution-scope=public, GIT_BRANCH=HEAD, GIT_CLEAN=True, RELEASE=HEAD, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=754, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, GIT_REPO=https://github.com/ceph/ceph-container.git, vcs-type=git, com.redhat.component=centos-stream-container, version=8, build-date=2022-05-03T08:36:31.336870, io.buildah.version=1.19.8, name=centos-stream, io.k8s.display-name=CentOS Stream 8, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, ceph=True, io.openshift.tags=base centos centos-stream, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, maintainer=Guillaume Abrioux , CEPH_POINT_RELEASE=-17.2.0, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vendor=Red Hat, Inc.) 2026-03-10T13:29:22.687 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:22 vm08 bash[94145]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5 2026-03-10T13:29:22.687 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:22 vm08 podman[94211]: 2026-03-10 13:29:22.659506252 +0000 UTC m=+0.015922867 container create 157db0a6dc0141e2c36cd0ad9da15d5cba3fc8a83bbddd9ef65043e023fdcaf8 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5-deactivate, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, OSD_FLAVOR=default, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T13:29:22.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:22 vm00 ceph-mon[94470]: Upgrade: Updating osd.5 2026-03-10T13:29:22.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:22 vm00 ceph-mon[94470]: Deploying daemon osd.5 on vm08 2026-03-10T13:29:22.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:22 vm00 ceph-mon[94470]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 211 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T13:29:22.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:22 vm00 ceph-mon[94470]: osd.5 marked itself down and dead 2026-03-10T13:29:22.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:22 vm00 ceph-mon[96293]: Upgrade: Updating osd.5 2026-03-10T13:29:22.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:22 vm00 ceph-mon[96293]: Deploying daemon osd.5 on vm08 2026-03-10T13:29:22.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:22 vm00 ceph-mon[96293]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 211 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-10T13:29:22.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:22 vm00 ceph-mon[96293]: osd.5 marked itself down and dead 2026-03-10T13:29:22.965 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:22 vm08 podman[94211]: 2026-03-10 13:29:22.707039933 +0000 UTC m=+0.063456539 container init 157db0a6dc0141e2c36cd0ad9da15d5cba3fc8a83bbddd9ef65043e023fdcaf8 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid) 2026-03-10T13:29:22.965 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:22 vm08 podman[94211]: 2026-03-10 13:29:22.710031152 +0000 UTC m=+0.066447767 container start 157db0a6dc0141e2c36cd0ad9da15d5cba3fc8a83bbddd9ef65043e023fdcaf8 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5-deactivate, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid) 2026-03-10T13:29:22.965 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:22 vm08 podman[94211]: 2026-03-10 13:29:22.711410785 +0000 UTC m=+0.067827390 container attach 157db0a6dc0141e2c36cd0ad9da15d5cba3fc8a83bbddd9ef65043e023fdcaf8 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5-deactivate, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_REF=squid, ceph=True, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T13:29:22.965 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:22 vm08 podman[94211]: 2026-03-10 13:29:22.652760465 +0000 UTC m=+0.009177091 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:29:22.965 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:22 vm08 podman[94211]: 2026-03-10 13:29:22.835502597 +0000 UTC m=+0.191919212 container died 157db0a6dc0141e2c36cd0ad9da15d5cba3fc8a83bbddd9ef65043e023fdcaf8 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5-deactivate, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, CEPH_REF=squid, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T13:29:22.965 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:22 vm08 podman[94211]: 2026-03-10 13:29:22.857080811 +0000 UTC m=+0.213497426 container remove 157db0a6dc0141e2c36cd0ad9da15d5cba3fc8a83bbddd9ef65043e023fdcaf8 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, CEPH_REF=squid, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T13:29:22.965 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:22 vm08 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.5.service: Deactivated successfully. 2026-03-10T13:29:22.965 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:22 vm08 systemd[1]: Stopped Ceph osd.5 for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:29:22.965 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:22 vm08 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.5.service: Consumed 26.794s CPU time. 2026-03-10T13:29:23.424 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:23 vm08 ceph-mon[82639]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T13:29:23.424 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:23 vm08 ceph-mon[82639]: osdmap e120: 8 total, 7 up, 8 in 2026-03-10T13:29:23.424 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:23 vm08 systemd[1]: Starting Ceph osd.5 for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:29:23.424 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:23 vm08 podman[94316]: 2026-03-10 13:29:23.171910462 +0000 UTC m=+0.018433487 container create ea9473723202f8596ca9c75d8e39a22e5af1183512cfe5b600705a81e298f1da (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5-activate, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223) 2026-03-10T13:29:23.424 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:23 vm08 podman[94316]: 2026-03-10 13:29:23.213852606 +0000 UTC m=+0.060375640 container init ea9473723202f8596ca9c75d8e39a22e5af1183512cfe5b600705a81e298f1da (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5-activate, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2) 2026-03-10T13:29:23.424 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:23 vm08 podman[94316]: 2026-03-10 13:29:23.216568079 +0000 UTC m=+0.063091113 container start ea9473723202f8596ca9c75d8e39a22e5af1183512cfe5b600705a81e298f1da (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5-activate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, ceph=True, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T13:29:23.424 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:23 vm08 podman[94316]: 2026-03-10 13:29:23.217538025 +0000 UTC m=+0.064061059 container attach ea9473723202f8596ca9c75d8e39a22e5af1183512cfe5b600705a81e298f1da (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5-activate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T13:29:23.424 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:23 vm08 podman[94316]: 2026-03-10 13:29:23.164906522 +0000 UTC m=+0.011429556 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:29:23.424 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:23 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5-activate[94327]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:29:23.424 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:23 vm08 bash[94316]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:29:23.424 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:23 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5-activate[94327]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:29:23.424 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:23 vm08 bash[94316]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:29:23.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:23 vm00 ceph-mon[94470]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T13:29:23.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:23 vm00 ceph-mon[94470]: osdmap e120: 8 total, 7 up, 8 in 2026-03-10T13:29:23.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:23 vm00 ceph-mon[96293]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T13:29:23.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:23 vm00 ceph-mon[96293]: osdmap e120: 8 total, 7 up, 8 in 2026-03-10T13:29:24.191 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:23 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5-activate[94327]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T13:29:24.191 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:23 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5-activate[94327]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:29:24.191 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:23 vm08 bash[94316]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T13:29:24.191 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:23 vm08 bash[94316]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:29:24.191 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:23 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5-activate[94327]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:29:24.191 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:23 vm08 bash[94316]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:29:24.191 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:23 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5-activate[94327]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5 2026-03-10T13:29:24.191 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:23 vm08 bash[94316]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5 2026-03-10T13:29:24.191 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:23 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5-activate[94327]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-a6380a16-b1f3-4140-88d0-eddea9cff551/osd-block-4b07141b-58eb-441e-a2a5-b6422715a810 --path /var/lib/ceph/osd/ceph-5 --no-mon-config 2026-03-10T13:29:24.191 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:23 vm08 bash[94316]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-a6380a16-b1f3-4140-88d0-eddea9cff551/osd-block-4b07141b-58eb-441e-a2a5-b6422715a810 --path /var/lib/ceph/osd/ceph-5 --no-mon-config 2026-03-10T13:29:24.435 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:29:24 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:29:24.148Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:29:24.441 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:24 vm08 ceph-mon[82639]: pgmap v101: 161 pgs: 22 stale+active+clean, 139 active+clean; 457 KiB data, 211 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T13:29:24.441 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:24 vm08 ceph-mon[82639]: osdmap e121: 8 total, 7 up, 8 in 2026-03-10T13:29:24.442 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:24 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5-activate[94327]: Running command: /usr/bin/ln -snf /dev/ceph-a6380a16-b1f3-4140-88d0-eddea9cff551/osd-block-4b07141b-58eb-441e-a2a5-b6422715a810 /var/lib/ceph/osd/ceph-5/block 2026-03-10T13:29:24.442 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:24 vm08 bash[94316]: Running command: /usr/bin/ln -snf /dev/ceph-a6380a16-b1f3-4140-88d0-eddea9cff551/osd-block-4b07141b-58eb-441e-a2a5-b6422715a810 /var/lib/ceph/osd/ceph-5/block 2026-03-10T13:29:24.442 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:24 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5-activate[94327]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-5/block 2026-03-10T13:29:24.442 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:24 vm08 bash[94316]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-5/block 2026-03-10T13:29:24.442 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:24 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5-activate[94327]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-10T13:29:24.442 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:24 vm08 bash[94316]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-10T13:29:24.442 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:24 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5-activate[94327]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5 2026-03-10T13:29:24.442 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:24 vm08 bash[94316]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5 2026-03-10T13:29:24.442 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:24 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5-activate[94327]: --> ceph-volume lvm activate successful for osd ID: 5 2026-03-10T13:29:24.442 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:24 vm08 bash[94316]: --> ceph-volume lvm activate successful for osd ID: 5 2026-03-10T13:29:24.442 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:24 vm08 conmon[94327]: conmon ea9473723202f8596ca9 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-ea9473723202f8596ca9c75d8e39a22e5af1183512cfe5b600705a81e298f1da.scope/container/memory.events 2026-03-10T13:29:24.442 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:24 vm08 podman[94527]: 2026-03-10 13:29:24.250435807 +0000 UTC m=+0.017225717 container died ea9473723202f8596ca9c75d8e39a22e5af1183512cfe5b600705a81e298f1da (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5-activate, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.build-date=20260223) 2026-03-10T13:29:24.442 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:24 vm08 podman[94527]: 2026-03-10 13:29:24.267615415 +0000 UTC m=+0.034405316 container remove ea9473723202f8596ca9c75d8e39a22e5af1183512cfe5b600705a81e298f1da (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5-activate, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0) 2026-03-10T13:29:24.442 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:24 vm08 podman[94568]: 2026-03-10 13:29:24.390459572 +0000 UTC m=+0.019987036 container create e197b6bd6561a9ff689e393f799509056386b4773f7e123f07d0df412c8b836b (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2) 2026-03-10T13:29:24.442 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:24 vm08 podman[94568]: 2026-03-10 13:29:24.432523032 +0000 UTC m=+0.062050507 container init e197b6bd6561a9ff689e393f799509056386b4773f7e123f07d0df412c8b836b (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T13:29:24.442 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:24 vm08 podman[94568]: 2026-03-10 13:29:24.439553874 +0000 UTC m=+0.069081338 container start e197b6bd6561a9ff689e393f799509056386b4773f7e123f07d0df412c8b836b (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T13:29:24.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:24 vm00 ceph-mon[94470]: pgmap v101: 161 pgs: 22 stale+active+clean, 139 active+clean; 457 KiB data, 211 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T13:29:24.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:24 vm00 ceph-mon[94470]: osdmap e121: 8 total, 7 up, 8 in 2026-03-10T13:29:24.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:24 vm00 ceph-mon[96293]: pgmap v101: 161 pgs: 22 stale+active+clean, 139 active+clean; 457 KiB data, 211 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T13:29:24.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:24 vm00 ceph-mon[96293]: osdmap e121: 8 total, 7 up, 8 in 2026-03-10T13:29:24.770 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:24 vm08 bash[94568]: e197b6bd6561a9ff689e393f799509056386b4773f7e123f07d0df412c8b836b 2026-03-10T13:29:24.770 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:24 vm08 podman[94568]: 2026-03-10 13:29:24.383108652 +0000 UTC m=+0.012636126 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:29:24.770 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:24 vm08 systemd[1]: Started Ceph osd.5 for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:29:25.415 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:25 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5[94578]: 2026-03-10T13:29:25.264+0000 7fcdf410d740 -1 Falling back to public interface 2026-03-10T13:29:25.682 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:25.682 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:25.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:25.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:25.752 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:29:25 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:29:25] "GET /metrics HTTP/1.1" 200 37777 "" "Prometheus/2.51.0" 2026-03-10T13:29:25.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:25.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:26.485 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:26 vm08 ceph-mon[82639]: pgmap v103: 161 pgs: 25 active+undersized, 15 peering, 1 stale+active+clean, 14 active+undersized+degraded, 106 active+clean; 457 KiB data, 211 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T13:29:26.486 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:26 vm08 ceph-mon[82639]: Health check failed: Reduced data availability: 2 pgs inactive, 5 pgs peering (PG_AVAILABILITY) 2026-03-10T13:29:26.486 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:26 vm08 ceph-mon[82639]: Health check failed: Degraded data redundancy: 55/627 objects degraded (8.772%), 14 pgs degraded (PG_DEGRADED) 2026-03-10T13:29:26.486 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:26 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:26.486 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:26 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:26.486 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:26 vm08 ceph-mon[82639]: from='osd.5 [v2:192.168.123.108:6808/2060163786,v1:192.168.123.108:6809/2060163786]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T13:29:26.486 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:26 vm08 ceph-mon[82639]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T13:29:26.486 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:26 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:26.486 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:26 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:26.486 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:26 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5[94578]: 2026-03-10T13:29:26.132+0000 7fcdf410d740 -1 osd.5 0 read_superblock omap replica is missing. 2026-03-10T13:29:26.486 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:26 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5[94578]: 2026-03-10T13:29:26.163+0000 7fcdf410d740 -1 osd.5 119 log_to_monitors true 2026-03-10T13:29:26.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:26 vm00 ceph-mon[94470]: pgmap v103: 161 pgs: 25 active+undersized, 15 peering, 1 stale+active+clean, 14 active+undersized+degraded, 106 active+clean; 457 KiB data, 211 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T13:29:26.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:26 vm00 ceph-mon[94470]: Health check failed: Reduced data availability: 2 pgs inactive, 5 pgs peering (PG_AVAILABILITY) 2026-03-10T13:29:26.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:26 vm00 ceph-mon[94470]: Health check failed: Degraded data redundancy: 55/627 objects degraded (8.772%), 14 pgs degraded (PG_DEGRADED) 2026-03-10T13:29:26.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:26 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:26.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:26 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:26.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:26 vm00 ceph-mon[94470]: from='osd.5 [v2:192.168.123.108:6808/2060163786,v1:192.168.123.108:6809/2060163786]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T13:29:26.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:26 vm00 ceph-mon[94470]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T13:29:26.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:26 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:26.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:26 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:26.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:26 vm00 ceph-mon[96293]: pgmap v103: 161 pgs: 25 active+undersized, 15 peering, 1 stale+active+clean, 14 active+undersized+degraded, 106 active+clean; 457 KiB data, 211 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T13:29:26.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:26 vm00 ceph-mon[96293]: Health check failed: Reduced data availability: 2 pgs inactive, 5 pgs peering (PG_AVAILABILITY) 2026-03-10T13:29:26.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:26 vm00 ceph-mon[96293]: Health check failed: Degraded data redundancy: 55/627 objects degraded (8.772%), 14 pgs degraded (PG_DEGRADED) 2026-03-10T13:29:26.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:26 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:26.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:26 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:26.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:26 vm00 ceph-mon[96293]: from='osd.5 [v2:192.168.123.108:6808/2060163786,v1:192.168.123.108:6809/2060163786]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T13:29:26.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:26 vm00 ceph-mon[96293]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T13:29:26.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:26 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:26.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:26 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:27.021 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:29:26 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5[94578]: 2026-03-10T13:29:26.970+0000 7fcdebeb8640 -1 osd.5 119 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T13:29:27.503 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:29:27 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:29:27.002Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:29:27.503 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:29:27 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:29:27.002Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:29:27.944 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:29:27 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:29:27.511+0000 7f1445d54640 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (19 PGs are or would become offline) 2026-03-10T13:29:28.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:27 vm00 ceph-mon[94470]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-10T13:29:28.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:27 vm00 ceph-mon[94470]: osdmap e122: 8 total, 7 up, 8 in 2026-03-10T13:29:28.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:27 vm00 ceph-mon[94470]: from='osd.5 [v2:192.168.123.108:6808/2060163786,v1:192.168.123.108:6809/2060163786]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:29:28.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:27 vm00 ceph-mon[94470]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:29:28.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:27 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:28.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:27 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:29:28.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:27 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:28.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:27 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:28.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:27 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:29:28.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:27 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:29:28.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:27 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:28.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:27 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:29:28.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:27 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:29:28.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:27 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:29:28.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:27 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:29:28.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:27 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-10T13:29:28.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:27 vm00 ceph-mon[96293]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-10T13:29:28.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:27 vm00 ceph-mon[96293]: osdmap e122: 8 total, 7 up, 8 in 2026-03-10T13:29:28.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:27 vm00 ceph-mon[96293]: from='osd.5 [v2:192.168.123.108:6808/2060163786,v1:192.168.123.108:6809/2060163786]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:29:28.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:27 vm00 ceph-mon[96293]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:29:28.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:27 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:28.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:27 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:29:28.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:27 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:28.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:27 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:28.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:27 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:29:28.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:27 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:29:28.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:27 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:28.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:27 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:29:28.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:27 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:29:28.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:27 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:29:28.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:27 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:29:28.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:27 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-10T13:29:28.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:27 vm08 ceph-mon[82639]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-10T13:29:28.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:27 vm08 ceph-mon[82639]: osdmap e122: 8 total, 7 up, 8 in 2026-03-10T13:29:28.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:27 vm08 ceph-mon[82639]: from='osd.5 [v2:192.168.123.108:6808/2060163786,v1:192.168.123.108:6809/2060163786]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:29:28.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:27 vm08 ceph-mon[82639]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:29:28.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:27 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:28.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:27 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:29:28.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:27 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:28.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:27 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:28.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:27 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:29:28.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:27 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:29:28.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:27 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:28.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:27 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:29:28.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:27 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:29:28.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:27 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:29:28.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:27 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:29:28.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:27 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-10T13:29:29.231 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:28 vm08 ceph-mon[82639]: pgmap v105: 161 pgs: 30 active+undersized, 15 peering, 14 active+undersized+degraded, 102 active+clean; 457 KiB data, 229 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T13:29:29.232 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:28 vm08 ceph-mon[82639]: from='mon.2 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-10T13:29:29.232 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:28 vm08 ceph-mon[82639]: Upgrade: unsafe to stop osd(s) at this time (19 PGs are or would become offline) 2026-03-10T13:29:29.232 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:28 vm08 ceph-mon[82639]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T13:29:29.232 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:28 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:29:29.232 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:28 vm08 ceph-mon[82639]: osd.5 [v2:192.168.123.108:6808/2060163786,v1:192.168.123.108:6809/2060163786] boot 2026-03-10T13:29:29.232 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:28 vm08 ceph-mon[82639]: osdmap e123: 8 total, 8 up, 8 in 2026-03-10T13:29:29.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:28 vm00 ceph-mon[94470]: pgmap v105: 161 pgs: 30 active+undersized, 15 peering, 14 active+undersized+degraded, 102 active+clean; 457 KiB data, 229 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T13:29:29.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:28 vm00 ceph-mon[94470]: from='mon.2 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-10T13:29:29.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:28 vm00 ceph-mon[94470]: Upgrade: unsafe to stop osd(s) at this time (19 PGs are or would become offline) 2026-03-10T13:29:29.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:28 vm00 ceph-mon[94470]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T13:29:29.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:28 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:29:29.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:28 vm00 ceph-mon[94470]: osd.5 [v2:192.168.123.108:6808/2060163786,v1:192.168.123.108:6809/2060163786] boot 2026-03-10T13:29:29.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:28 vm00 ceph-mon[94470]: osdmap e123: 8 total, 8 up, 8 in 2026-03-10T13:29:29.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:28 vm00 ceph-mon[96293]: pgmap v105: 161 pgs: 30 active+undersized, 15 peering, 14 active+undersized+degraded, 102 active+clean; 457 KiB data, 229 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-10T13:29:29.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:28 vm00 ceph-mon[96293]: from='mon.2 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-10T13:29:29.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:28 vm00 ceph-mon[96293]: Upgrade: unsafe to stop osd(s) at this time (19 PGs are or would become offline) 2026-03-10T13:29:29.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:28 vm00 ceph-mon[96293]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T13:29:29.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:28 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T13:29:29.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:28 vm00 ceph-mon[96293]: osd.5 [v2:192.168.123.108:6808/2060163786,v1:192.168.123.108:6809/2060163786] boot 2026-03-10T13:29:29.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:28 vm00 ceph-mon[96293]: osdmap e123: 8 total, 8 up, 8 in 2026-03-10T13:29:30.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:29 vm00 ceph-mon[94470]: osdmap e124: 8 total, 8 up, 8 in 2026-03-10T13:29:30.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:29 vm00 ceph-mon[96293]: osdmap e124: 8 total, 8 up, 8 in 2026-03-10T13:29:30.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:29 vm08 ceph-mon[82639]: osdmap e124: 8 total, 8 up, 8 in 2026-03-10T13:29:31.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:30 vm00 ceph-mon[94470]: pgmap v108: 161 pgs: 28 active+undersized, 15 peering, 11 active+undersized+degraded, 107 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 46/627 objects degraded (7.337%) 2026-03-10T13:29:31.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:30 vm00 ceph-mon[96293]: pgmap v108: 161 pgs: 28 active+undersized, 15 peering, 11 active+undersized+degraded, 107 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 46/627 objects degraded (7.337%) 2026-03-10T13:29:31.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:30 vm08 ceph-mon[82639]: pgmap v108: 161 pgs: 28 active+undersized, 15 peering, 11 active+undersized+degraded, 107 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 46/627 objects degraded (7.337%) 2026-03-10T13:29:32.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:32 vm00 ceph-mon[94470]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:29:32.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:32 vm00 ceph-mon[96293]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:29:32.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:32 vm08 ceph-mon[82639]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:29:33.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:33 vm00 ceph-mon[94470]: pgmap v109: 161 pgs: 11 active+undersized, 15 peering, 6 active+undersized+degraded, 129 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 682 B/s rd, 0 op/s; 28/627 objects degraded (4.466%) 2026-03-10T13:29:33.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:33 vm00 ceph-mon[94470]: Health check update: Degraded data redundancy: 28/627 objects degraded (4.466%), 6 pgs degraded (PG_DEGRADED) 2026-03-10T13:29:33.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:33 vm00 ceph-mon[96293]: pgmap v109: 161 pgs: 11 active+undersized, 15 peering, 6 active+undersized+degraded, 129 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 682 B/s rd, 0 op/s; 28/627 objects degraded (4.466%) 2026-03-10T13:29:33.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:33 vm00 ceph-mon[96293]: Health check update: Degraded data redundancy: 28/627 objects degraded (4.466%), 6 pgs degraded (PG_DEGRADED) 2026-03-10T13:29:33.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:33 vm08 ceph-mon[82639]: pgmap v109: 161 pgs: 11 active+undersized, 15 peering, 6 active+undersized+degraded, 129 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 682 B/s rd, 0 op/s; 28/627 objects degraded (4.466%) 2026-03-10T13:29:33.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:33 vm08 ceph-mon[82639]: Health check update: Degraded data redundancy: 28/627 objects degraded (4.466%), 6 pgs degraded (PG_DEGRADED) 2026-03-10T13:29:34.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:34 vm00 ceph-mon[94470]: pgmap v110: 161 pgs: 10 peering, 151 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 642 B/s rd, 0 op/s 2026-03-10T13:29:34.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:34 vm00 ceph-mon[96293]: pgmap v110: 161 pgs: 10 peering, 151 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 642 B/s rd, 0 op/s 2026-03-10T13:29:34.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:34 vm08 ceph-mon[82639]: pgmap v110: 161 pgs: 10 peering, 151 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 642 B/s rd, 0 op/s 2026-03-10T13:29:35.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:35 vm00 ceph-mon[94470]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 2 pgs inactive, 5 pgs peering) 2026-03-10T13:29:35.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:35 vm00 ceph-mon[94470]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 28/627 objects degraded (4.466%), 6 pgs degraded) 2026-03-10T13:29:35.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:35 vm00 ceph-mon[94470]: Cluster is now healthy 2026-03-10T13:29:35.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:35 vm00 ceph-mon[96293]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 2 pgs inactive, 5 pgs peering) 2026-03-10T13:29:35.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:35 vm00 ceph-mon[96293]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 28/627 objects degraded (4.466%), 6 pgs degraded) 2026-03-10T13:29:35.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:35 vm00 ceph-mon[96293]: Cluster is now healthy 2026-03-10T13:29:35.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:35 vm08 ceph-mon[82639]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 2 pgs inactive, 5 pgs peering) 2026-03-10T13:29:35.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:35 vm08 ceph-mon[82639]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 28/627 objects degraded (4.466%), 6 pgs degraded) 2026-03-10T13:29:35.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:35 vm08 ceph-mon[82639]: Cluster is now healthy 2026-03-10T13:29:36.002 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:29:35 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:29:35] "GET /metrics HTTP/1.1" 200 37699 "" "Prometheus/2.51.0" 2026-03-10T13:29:36.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:36 vm00 ceph-mon[94470]: pgmap v111: 161 pgs: 161 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T13:29:36.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:36 vm00 ceph-mon[96293]: pgmap v111: 161 pgs: 161 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T13:29:36.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:36 vm08 ceph-mon[82639]: pgmap v111: 161 pgs: 161 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T13:29:37.502 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:29:37 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:29:37.003Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:29:37.503 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:29:37 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:29:37.004Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:29:38.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:38 vm00 ceph-mon[94470]: pgmap v112: 161 pgs: 161 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 983 B/s rd, 0 op/s 2026-03-10T13:29:38.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:38 vm00 ceph-mon[96293]: pgmap v112: 161 pgs: 161 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 983 B/s rd, 0 op/s 2026-03-10T13:29:38.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:38 vm08 ceph-mon[82639]: pgmap v112: 161 pgs: 161 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 983 B/s rd, 0 op/s 2026-03-10T13:29:40.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:40 vm00 ceph-mon[94470]: pgmap v113: 161 pgs: 161 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 889 B/s rd, 0 op/s 2026-03-10T13:29:40.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:40 vm00 ceph-mon[96293]: pgmap v113: 161 pgs: 161 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 889 B/s rd, 0 op/s 2026-03-10T13:29:40.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:40 vm08 ceph-mon[82639]: pgmap v113: 161 pgs: 161 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 889 B/s rd, 0 op/s 2026-03-10T13:29:41.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:41 vm00 ceph-mon[94470]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:29:41.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:41 vm00 ceph-mon[96293]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:29:41.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:41 vm08 ceph-mon[82639]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:29:41.826 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T13:29:42.250 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T13:29:42.250 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (16m) 58s ago 22m 26.4M - 0.25.0 c8568f914cd2 12fde3cf83cb 2026-03-10T13:29:42.250 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm08 *:3000 running (3m) 16s ago 22m 87.0M - 10.4.0 c8b91775d855 960e32589e98 2026-03-10T13:29:42.250 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dezodo vm00 running (3m) 58s ago 22m 49.4M - 3.5 e1d6a67b021e 630bf6d4e7f3 2026-03-10T13:29:42.250 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm08 *:8443,9283,8765 running (3m) 16s ago 24m 488M - 19.2.3-678-ge911bdeb 654f31e6858e 31b91eebc856 2026-03-10T13:29:42.250 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (10m) 58s ago 25m 555M - 19.2.3-678-ge911bdeb 654f31e6858e 5bc576d4d32b 2026-03-10T13:29:42.250 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (2m) 58s ago 25m 49.3M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 981df6371890 2026-03-10T13:29:42.250 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm08 running (3m) 16s ago 24m 48.2M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 8cceb678a9ee 2026-03-10T13:29:42.250 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (2m) 58s ago 24m 40.0M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 43deda66dee3 2026-03-10T13:29:42.250 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (16m) 58s ago 22m 10.8M - 1.7.0 72c9c2088986 bcf883401619 2026-03-10T13:29:42.250 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm08 *:9100 running (16m) 16s ago 22m 10.0M - 1.7.0 72c9c2088986 4ac83f03f818 2026-03-10T13:29:42.250 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (2m) 58s ago 24m 46.9M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 5fc74f4d2179 2026-03-10T13:29:42.250 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (2m) 58s ago 24m 68.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e dc65e199e9eb 2026-03-10T13:29:42.250 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (94s) 58s ago 23m 66.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 706171e0f5c2 2026-03-10T13:29:42.250 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (59s) 58s ago 23m 13.0M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 8739c77cf14d 2026-03-10T13:29:42.250 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm08 running (38s) 16s ago 23m 70.6M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 5b92674798b7 2026-03-10T13:29:42.250 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm08 running (17s) 16s ago 23m 12.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e e197b6bd6561 2026-03-10T13:29:42.250 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm08 running (23m) 16s ago 23m 56.2M 4096M 17.2.0 e1d6a67b021e c27676916d52 2026-03-10T13:29:42.250 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm08 running (23m) 16s ago 23m 58.7M 4096M 17.2.0 e1d6a67b021e bf67951990a5 2026-03-10T13:29:42.250 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm08 *:9095 running (3m) 16s ago 22m 48.4M - 2.51.0 1d3b7f56885b 3f9b2d0821c9 2026-03-10T13:29:42.250 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.tvlvzo vm00 *:8000 running (22m) 58s ago 22m 98.1M - 17.2.0 e1d6a67b021e be24eac16807 2026-03-10T13:29:42.250 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm08.ljayps vm08 *:8000 running (22m) 16s ago 22m 95.7M - 17.2.0 e1d6a67b021e 67b525427823 2026-03-10T13:29:42.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:42 vm00 ceph-mon[94470]: pgmap v114: 161 pgs: 161 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:29:42.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:42 vm00 ceph-mon[94470]: from='client.54271 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:29:42.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:42 vm00 ceph-mon[94470]: from='client.34358 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:29:42.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:42 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:42.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:42 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:29:42.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:42 vm00 ceph-mon[96293]: pgmap v114: 161 pgs: 161 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:29:42.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:42 vm00 ceph-mon[96293]: from='client.54271 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:29:42.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:42 vm00 ceph-mon[96293]: from='client.34358 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:29:42.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:42 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:42.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:42 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:29:42.522 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:29:42.522 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T13:29:42.522 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-10T13:29:42.522 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:29:42.522 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T13:29:42.522 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T13:29:42.522 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:29:42.522 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T13:29:42.522 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2, 2026-03-10T13:29:42.522 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 6 2026-03-10T13:29:42.522 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:29:42.522 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T13:29:42.522 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-10T13:29:42.522 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:29:42.522 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T13:29:42.522 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4, 2026-03-10T13:29:42.522 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 11 2026-03-10T13:29:42.522 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T13:29:42.523 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:29:42.724 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:42 vm08 ceph-mon[82639]: pgmap v114: 161 pgs: 161 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:29:42.724 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:42 vm08 ceph-mon[82639]: from='client.54271 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:29:42.724 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:42 vm08 ceph-mon[82639]: from='client.34358 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:29:42.724 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:42 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:42.724 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:42 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:29:42.739 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:29:42.739 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T13:29:42.739 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": true, 2026-03-10T13:29:42.739 INFO:teuthology.orchestra.run.vm00.stdout: "which": "Upgrading daemons of type(s) crash,osd", 2026-03-10T13:29:42.739 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [], 2026-03-10T13:29:42.739 INFO:teuthology.orchestra.run.vm00.stdout: "progress": "6/8 daemons upgraded", 2026-03-10T13:29:42.739 INFO:teuthology.orchestra.run.vm00.stdout: "message": "Currently upgrading osd daemons", 2026-03-10T13:29:42.739 INFO:teuthology.orchestra.run.vm00.stdout: "is_paused": false 2026-03-10T13:29:42.739 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:29:43.571 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:43 vm08 ceph-mon[82639]: from='client.54280 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:29:43.571 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:43 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:43.571 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:43 vm08 ceph-mon[82639]: from='client.? 192.168.123.100:0/1346728748' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:29:43.571 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:43 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-10T13:29:43.571 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:43 vm08 ceph-mon[82639]: from='mon.2 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-10T13:29:43.571 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:43 vm08 ceph-mon[82639]: Upgrade: osd.6 is safe to restart 2026-03-10T13:29:43.571 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:43 vm08 ceph-mon[82639]: from='client.34361 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:29:43.571 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:43 vm08 ceph-mon[82639]: Upgrade: Updating osd.6 2026-03-10T13:29:43.571 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:43 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:43.571 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:43 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-10T13:29:43.571 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:43 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:29:43.571 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:43 vm08 ceph-mon[82639]: Deploying daemon osd.6 on vm08 2026-03-10T13:29:43.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:43 vm00 ceph-mon[96293]: from='client.54280 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:29:43.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:43 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:43.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:43 vm00 ceph-mon[96293]: from='client.? 192.168.123.100:0/1346728748' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:29:43.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:43 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-10T13:29:43.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:43 vm00 ceph-mon[96293]: from='mon.2 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-10T13:29:43.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:43 vm00 ceph-mon[96293]: Upgrade: osd.6 is safe to restart 2026-03-10T13:29:43.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:43 vm00 ceph-mon[96293]: from='client.34361 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:29:43.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:43 vm00 ceph-mon[96293]: Upgrade: Updating osd.6 2026-03-10T13:29:43.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:43 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:43.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:43 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-10T13:29:43.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:43 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:29:43.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:43 vm00 ceph-mon[96293]: Deploying daemon osd.6 on vm08 2026-03-10T13:29:43.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:43 vm00 ceph-mon[94470]: from='client.54280 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:29:43.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:43 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:43.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:43 vm00 ceph-mon[94470]: from='client.? 192.168.123.100:0/1346728748' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:29:43.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:43 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-10T13:29:43.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:43 vm00 ceph-mon[94470]: from='mon.2 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-10T13:29:43.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:43 vm00 ceph-mon[94470]: Upgrade: osd.6 is safe to restart 2026-03-10T13:29:43.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:43 vm00 ceph-mon[94470]: from='client.34361 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:29:43.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:43 vm00 ceph-mon[94470]: Upgrade: Updating osd.6 2026-03-10T13:29:43.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:43 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:43.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:43 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-10T13:29:43.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:43 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:29:43.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:43 vm00 ceph-mon[94470]: Deploying daemon osd.6 on vm08 2026-03-10T13:29:44.020 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:43 vm08 systemd[1]: Stopping Ceph osd.6 for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:29:44.020 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6[58662]: 2026-03-10T13:29:43.690+0000 7efe457c9700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.6 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T13:29:44.020 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6[58662]: 2026-03-10T13:29:43.690+0000 7efe457c9700 -1 osd.6 124 *** Got signal Terminated *** 2026-03-10T13:29:44.020 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:43 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6[58662]: 2026-03-10T13:29:43.690+0000 7efe457c9700 -1 osd.6 124 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T13:29:44.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:44 vm00 ceph-mon[96293]: pgmap v115: 161 pgs: 161 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:29:44.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:44 vm00 ceph-mon[96293]: osd.6 marked itself down and dead 2026-03-10T13:29:44.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:44 vm00 ceph-mon[94470]: pgmap v115: 161 pgs: 161 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:29:44.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:44 vm00 ceph-mon[94470]: osd.6 marked itself down and dead 2026-03-10T13:29:44.753 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:44 vm08 ceph-mon[82639]: pgmap v115: 161 pgs: 161 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:29:44.754 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:44 vm08 ceph-mon[82639]: osd.6 marked itself down and dead 2026-03-10T13:29:44.754 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:44 vm08 podman[98273]: 2026-03-10 13:29:44.56053009 +0000 UTC m=+0.889315197 container died c27676916d52de13c76dfb19930ce653464f24be21ffcd995ca88a216353addd (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6, maintainer=Guillaume Abrioux , GIT_REPO=https://github.com/ceph/ceph-container.git, RELEASE=HEAD, io.buildah.version=1.19.8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, io.openshift.tags=base centos centos-stream, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, vcs-type=git, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=centos-stream, GIT_BRANCH=HEAD, architecture=x86_64, distribution-scope=public, release=754, io.k8s.display-name=CentOS Stream 8, build-date=2022-05-03T08:36:31.336870, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, io.openshift.expose-services=, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, com.redhat.component=centos-stream-container, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, vendor=Red Hat, Inc., GIT_CLEAN=True, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, ceph=True, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, version=8, CEPH_POINT_RELEASE=-17.2.0) 2026-03-10T13:29:44.754 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:44 vm08 podman[98273]: 2026-03-10 13:29:44.582424022 +0000 UTC m=+0.911209129 container remove c27676916d52de13c76dfb19930ce653464f24be21ffcd995ca88a216353addd (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6, GIT_BRANCH=HEAD, RELEASE=HEAD, GIT_CLEAN=True, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, name=centos-stream, build-date=2022-05-03T08:36:31.336870, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, vendor=Red Hat, Inc., ceph=True, io.openshift.tags=base centos centos-stream, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, release=754, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, version=8, GIT_REPO=https://github.com/ceph/ceph-container.git, maintainer=Guillaume Abrioux , CEPH_POINT_RELEASE=-17.2.0, distribution-scope=public, io.buildah.version=1.19.8, io.k8s.display-name=CentOS Stream 8, com.redhat.component=centos-stream-container, architecture=x86_64, vcs-type=git) 2026-03-10T13:29:44.754 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:44 vm08 bash[98273]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6 2026-03-10T13:29:44.754 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:44 vm08 podman[98339]: 2026-03-10 13:29:44.729246346 +0000 UTC m=+0.016667833 container create 11ad019da7784a3a222306a7846d4b470a40eb0684b216a15c946411e243173b (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, OSD_FLAVOR=default) 2026-03-10T13:29:45.021 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:44 vm08 podman[98339]: 2026-03-10 13:29:44.771569111 +0000 UTC m=+0.058990598 container init 11ad019da7784a3a222306a7846d4b470a40eb0684b216a15c946411e243173b (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6-deactivate, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, io.buildah.version=1.41.3) 2026-03-10T13:29:45.021 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:44 vm08 podman[98339]: 2026-03-10 13:29:44.77470443 +0000 UTC m=+0.062125907 container start 11ad019da7784a3a222306a7846d4b470a40eb0684b216a15c946411e243173b (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223) 2026-03-10T13:29:45.021 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:44 vm08 podman[98339]: 2026-03-10 13:29:44.776336455 +0000 UTC m=+0.063757942 container attach 11ad019da7784a3a222306a7846d4b470a40eb0684b216a15c946411e243173b (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6-deactivate, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, OSD_FLAVOR=default, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS) 2026-03-10T13:29:45.021 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:44 vm08 podman[98339]: 2026-03-10 13:29:44.722613369 +0000 UTC m=+0.010034867 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:29:45.021 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:44 vm08 conmon[98352]: conmon 11ad019da7784a3a2223 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-11ad019da7784a3a222306a7846d4b470a40eb0684b216a15c946411e243173b.scope/container/memory.events 2026-03-10T13:29:45.021 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:44 vm08 podman[98339]: 2026-03-10 13:29:44.910427456 +0000 UTC m=+0.197848952 container died 11ad019da7784a3a222306a7846d4b470a40eb0684b216a15c946411e243173b (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, OSD_FLAVOR=default) 2026-03-10T13:29:45.021 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:44 vm08 podman[98339]: 2026-03-10 13:29:44.92996543 +0000 UTC m=+0.217386917 container remove 11ad019da7784a3a222306a7846d4b470a40eb0684b216a15c946411e243173b (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid) 2026-03-10T13:29:45.021 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:44 vm08 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.6.service: Deactivated successfully. 2026-03-10T13:29:45.021 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:44 vm08 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.6.service: Unit process 98352 (conmon) remains running after unit stopped. 2026-03-10T13:29:45.021 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:44 vm08 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.6.service: Unit process 98360 (podman) remains running after unit stopped. 2026-03-10T13:29:45.021 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:44 vm08 systemd[1]: Stopped Ceph osd.6 for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:29:45.021 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:44 vm08 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.6.service: Consumed 6.830s CPU time, 178.2M memory peak. 2026-03-10T13:29:45.286 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:45 vm08 systemd[1]: Starting Ceph osd.6 for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:29:45.286 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:45 vm08 podman[98439]: 2026-03-10 13:29:45.244106873 +0000 UTC m=+0.026935353 container create b1ce905f5f682b95e57c685e6c9c2490f1510951effdcd9e57aed118068b9f18 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6-activate, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3) 2026-03-10T13:29:45.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:45 vm08 ceph-mon[82639]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T13:29:45.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:45 vm08 ceph-mon[82639]: osdmap e125: 8 total, 7 up, 8 in 2026-03-10T13:29:45.771 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:45 vm08 podman[98439]: 2026-03-10 13:29:45.288100055 +0000 UTC m=+0.070928544 container init b1ce905f5f682b95e57c685e6c9c2490f1510951effdcd9e57aed118068b9f18 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6-activate, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True) 2026-03-10T13:29:45.771 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:45 vm08 podman[98439]: 2026-03-10 13:29:45.302453144 +0000 UTC m=+0.085281633 container start b1ce905f5f682b95e57c685e6c9c2490f1510951effdcd9e57aed118068b9f18 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6-activate, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, CEPH_REF=squid, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T13:29:45.771 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:45 vm08 podman[98439]: 2026-03-10 13:29:45.303499803 +0000 UTC m=+0.086328292 container attach b1ce905f5f682b95e57c685e6c9c2490f1510951effdcd9e57aed118068b9f18 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6-activate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T13:29:45.771 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:45 vm08 podman[98439]: 2026-03-10 13:29:45.230758716 +0000 UTC m=+0.013587215 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:29:45.771 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:45 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6-activate[98451]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:29:45.771 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:45 vm08 bash[98439]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:29:45.771 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:45 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6-activate[98451]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:29:45.771 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:45 vm08 bash[98439]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:29:46.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:45 vm00 ceph-mon[96293]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T13:29:46.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:45 vm00 ceph-mon[96293]: osdmap e125: 8 total, 7 up, 8 in 2026-03-10T13:29:46.002 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:29:45 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:29:45] "GET /metrics HTTP/1.1" 200 37862 "" "Prometheus/2.51.0" 2026-03-10T13:29:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:45 vm00 ceph-mon[94470]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T13:29:46.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:45 vm00 ceph-mon[94470]: osdmap e125: 8 total, 7 up, 8 in 2026-03-10T13:29:46.270 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:46 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6-activate[98451]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T13:29:46.271 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:46 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6-activate[98451]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:29:46.271 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:46 vm08 bash[98439]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T13:29:46.271 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:46 vm08 bash[98439]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:29:46.271 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:46 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6-activate[98451]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:29:46.271 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:46 vm08 bash[98439]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:29:46.271 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:46 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6-activate[98451]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6 2026-03-10T13:29:46.271 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:46 vm08 bash[98439]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6 2026-03-10T13:29:46.271 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:46 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6-activate[98451]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-8f97367a-091d-4f01-ab50-4a6905540e34/osd-block-8d39583c-2063-4d14-9842-a1a1a8782f74 --path /var/lib/ceph/osd/ceph-6 --no-mon-config 2026-03-10T13:29:46.271 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:46 vm08 bash[98439]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-8f97367a-091d-4f01-ab50-4a6905540e34/osd-block-8d39583c-2063-4d14-9842-a1a1a8782f74 --path /var/lib/ceph/osd/ceph-6 --no-mon-config 2026-03-10T13:29:46.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:46 vm08 ceph-mon[82639]: pgmap v117: 161 pgs: 31 peering, 5 stale+active+clean, 125 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:29:46.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:46 vm08 ceph-mon[82639]: Health check failed: Reduced data availability: 1 pg inactive, 5 pgs peering (PG_AVAILABILITY) 2026-03-10T13:29:46.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:46 vm08 ceph-mon[82639]: osdmap e126: 8 total, 7 up, 8 in 2026-03-10T13:29:46.771 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:46 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6-activate[98451]: Running command: /usr/bin/ln -snf /dev/ceph-8f97367a-091d-4f01-ab50-4a6905540e34/osd-block-8d39583c-2063-4d14-9842-a1a1a8782f74 /var/lib/ceph/osd/ceph-6/block 2026-03-10T13:29:46.771 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:46 vm08 bash[98439]: Running command: /usr/bin/ln -snf /dev/ceph-8f97367a-091d-4f01-ab50-4a6905540e34/osd-block-8d39583c-2063-4d14-9842-a1a1a8782f74 /var/lib/ceph/osd/ceph-6/block 2026-03-10T13:29:46.771 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:46 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6-activate[98451]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-6/block 2026-03-10T13:29:46.771 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:46 vm08 bash[98439]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-6/block 2026-03-10T13:29:46.771 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:46 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6-activate[98451]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-10T13:29:46.771 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:46 vm08 bash[98439]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-10T13:29:46.771 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:46 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6-activate[98451]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6 2026-03-10T13:29:46.771 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:46 vm08 bash[98439]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6 2026-03-10T13:29:46.771 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:46 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6-activate[98451]: --> ceph-volume lvm activate successful for osd ID: 6 2026-03-10T13:29:46.771 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:46 vm08 bash[98439]: --> ceph-volume lvm activate successful for osd ID: 6 2026-03-10T13:29:46.771 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:46 vm08 conmon[98451]: conmon b1ce905f5f682b95e57c : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-b1ce905f5f682b95e57c685e6c9c2490f1510951effdcd9e57aed118068b9f18.scope/container/memory.events 2026-03-10T13:29:46.771 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:46 vm08 podman[98439]: 2026-03-10 13:29:46.420707729 +0000 UTC m=+1.203536218 container died b1ce905f5f682b95e57c685e6c9c2490f1510951effdcd9e57aed118068b9f18 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6-activate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, ceph=True) 2026-03-10T13:29:46.771 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:46 vm08 podman[98439]: 2026-03-10 13:29:46.458972287 +0000 UTC m=+1.241800777 container remove b1ce905f5f682b95e57c685e6c9c2490f1510951effdcd9e57aed118068b9f18 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6-activate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, OSD_FLAVOR=default, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T13:29:46.771 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:46 vm08 podman[98711]: 2026-03-10 13:29:46.577663049 +0000 UTC m=+0.018739280 container create 529ca6d92bc210f750ab59898d4dbafcf91309c2f3454a27cf34d159d127696e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, io.buildah.version=1.41.3) 2026-03-10T13:29:46.771 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:46 vm08 podman[98711]: 2026-03-10 13:29:46.610686157 +0000 UTC m=+0.051762398 container init 529ca6d92bc210f750ab59898d4dbafcf91309c2f3454a27cf34d159d127696e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T13:29:46.771 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:46 vm08 podman[98711]: 2026-03-10 13:29:46.617562318 +0000 UTC m=+0.058638559 container start 529ca6d92bc210f750ab59898d4dbafcf91309c2f3454a27cf34d159d127696e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6, ceph=True, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default) 2026-03-10T13:29:46.772 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:46 vm08 bash[98711]: 529ca6d92bc210f750ab59898d4dbafcf91309c2f3454a27cf34d159d127696e 2026-03-10T13:29:46.772 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:46 vm08 podman[98711]: 2026-03-10 13:29:46.571294106 +0000 UTC m=+0.012370358 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:29:46.772 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:46 vm08 systemd[1]: Started Ceph osd.6 for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:29:47.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:46 vm00 ceph-mon[94470]: pgmap v117: 161 pgs: 31 peering, 5 stale+active+clean, 125 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:29:47.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:46 vm00 ceph-mon[94470]: Health check failed: Reduced data availability: 1 pg inactive, 5 pgs peering (PG_AVAILABILITY) 2026-03-10T13:29:47.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:46 vm00 ceph-mon[94470]: osdmap e126: 8 total, 7 up, 8 in 2026-03-10T13:29:47.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:46 vm00 ceph-mon[96293]: pgmap v117: 161 pgs: 31 peering, 5 stale+active+clean, 125 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:29:47.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:46 vm00 ceph-mon[96293]: Health check failed: Reduced data availability: 1 pg inactive, 5 pgs peering (PG_AVAILABILITY) 2026-03-10T13:29:47.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:46 vm00 ceph-mon[96293]: osdmap e126: 8 total, 7 up, 8 in 2026-03-10T13:29:47.502 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:29:47 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:29:47.004Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:29:47.502 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:29:47 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:29:47.005Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:29:47.657 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:47 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6[98722]: 2026-03-10T13:29:47.445+0000 7f60fd416740 -1 Falling back to public interface 2026-03-10T13:29:47.927 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:47 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:47.927 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:47 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:48.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:47 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:48.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:47 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:48.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:47 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:48.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:47 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:48.663 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:48 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6[98722]: 2026-03-10T13:29:48.321+0000 7f60fd416740 -1 osd.6 0 read_superblock omap replica is missing. 2026-03-10T13:29:48.663 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:48 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6[98722]: 2026-03-10T13:29:48.358+0000 7f60fd416740 -1 osd.6 124 log_to_monitors true 2026-03-10T13:29:48.663 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:48 vm08 ceph-mon[82639]: pgmap v119: 161 pgs: 5 active+undersized, 31 peering, 3 stale+active+clean, 4 active+undersized+degraded, 118 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 12/627 objects degraded (1.914%) 2026-03-10T13:29:48.663 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:48 vm08 ceph-mon[82639]: Health check failed: Degraded data redundancy: 12/627 objects degraded (1.914%), 4 pgs degraded (PG_DEGRADED) 2026-03-10T13:29:48.663 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:48 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:48.663 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:48 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:48.663 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:48 vm08 ceph-mon[82639]: from='osd.6 [v2:192.168.123.108:6816/1574751781,v1:192.168.123.108:6817/1574751781]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T13:29:48.663 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:48 vm08 ceph-mon[82639]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T13:29:48.663 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:48 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:48.663 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:48 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:49.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:48 vm00 ceph-mon[94470]: pgmap v119: 161 pgs: 5 active+undersized, 31 peering, 3 stale+active+clean, 4 active+undersized+degraded, 118 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 12/627 objects degraded (1.914%) 2026-03-10T13:29:49.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:48 vm00 ceph-mon[94470]: Health check failed: Degraded data redundancy: 12/627 objects degraded (1.914%), 4 pgs degraded (PG_DEGRADED) 2026-03-10T13:29:49.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:48 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:49.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:48 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:49.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:48 vm00 ceph-mon[94470]: from='osd.6 [v2:192.168.123.108:6816/1574751781,v1:192.168.123.108:6817/1574751781]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T13:29:49.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:48 vm00 ceph-mon[94470]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T13:29:49.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:48 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:49.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:48 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:49.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:48 vm00 ceph-mon[96293]: pgmap v119: 161 pgs: 5 active+undersized, 31 peering, 3 stale+active+clean, 4 active+undersized+degraded, 118 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 12/627 objects degraded (1.914%) 2026-03-10T13:29:49.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:48 vm00 ceph-mon[96293]: Health check failed: Degraded data redundancy: 12/627 objects degraded (1.914%), 4 pgs degraded (PG_DEGRADED) 2026-03-10T13:29:49.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:48 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:49.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:48 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:49.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:48 vm00 ceph-mon[96293]: from='osd.6 [v2:192.168.123.108:6816/1574751781,v1:192.168.123.108:6817/1574751781]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T13:29:49.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:48 vm00 ceph-mon[96293]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T13:29:49.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:48 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:49.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:48 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:50.003 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:29:49 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:29:49.583+0000 7f1445d54640 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (20 PGs are or would become offline) 2026-03-10T13:29:50.270 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:29:50 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6[98722]: 2026-03-10T13:29:50.005+0000 7f60f49c0640 -1 osd.6 124 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T13:29:50.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:50 vm08 ceph-mon[82639]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-10T13:29:50.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:50 vm08 ceph-mon[82639]: osdmap e127: 8 total, 7 up, 8 in 2026-03-10T13:29:50.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:50 vm08 ceph-mon[82639]: from='osd.6 [v2:192.168.123.108:6816/1574751781,v1:192.168.123.108:6817/1574751781]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:29:50.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:50 vm08 ceph-mon[82639]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:29:50.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:50 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:50.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:50 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:50.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:50 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:29:50.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:50 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:29:50.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:50 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:50.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:50 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:29:50.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:50 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:29:50.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:50 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:29:50.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:50 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:29:50.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:50 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-10T13:29:50.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:50 vm00 ceph-mon[94470]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-10T13:29:50.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:50 vm00 ceph-mon[94470]: osdmap e127: 8 total, 7 up, 8 in 2026-03-10T13:29:50.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:50 vm00 ceph-mon[94470]: from='osd.6 [v2:192.168.123.108:6816/1574751781,v1:192.168.123.108:6817/1574751781]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:29:50.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:50 vm00 ceph-mon[94470]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:29:50.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:50 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:50.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:50 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:50.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:50 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:29:50.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:50 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:29:50.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:50 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:50.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:50 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:29:50.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:50 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:29:50.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:50 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:29:50.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:50 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:29:50.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:50 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-10T13:29:50.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:50 vm00 ceph-mon[96293]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-10T13:29:50.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:50 vm00 ceph-mon[96293]: osdmap e127: 8 total, 7 up, 8 in 2026-03-10T13:29:50.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:50 vm00 ceph-mon[96293]: from='osd.6 [v2:192.168.123.108:6816/1574751781,v1:192.168.123.108:6817/1574751781]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:29:50.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:50 vm00 ceph-mon[96293]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:29:50.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:50 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:50.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:50 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:50.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:50 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:29:50.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:50 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:29:50.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:50 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:29:50.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:50 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:29:50.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:50 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:29:50.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:50 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:29:50.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:50 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:29:50.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:50 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-10T13:29:51.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:51 vm00 ceph-mon[94470]: pgmap v121: 161 pgs: 14 active+undersized, 31 peering, 8 active+undersized+degraded, 108 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 29/627 objects degraded (4.625%) 2026-03-10T13:29:51.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:51 vm00 ceph-mon[94470]: from='mon.2 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-10T13:29:51.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:51 vm00 ceph-mon[94470]: Upgrade: unsafe to stop osd(s) at this time (20 PGs are or would become offline) 2026-03-10T13:29:51.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:51 vm00 ceph-mon[94470]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T13:29:51.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:51 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:29:51.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:51 vm00 ceph-mon[94470]: osd.6 [v2:192.168.123.108:6816/1574751781,v1:192.168.123.108:6817/1574751781] boot 2026-03-10T13:29:51.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:51 vm00 ceph-mon[94470]: osdmap e128: 8 total, 8 up, 8 in 2026-03-10T13:29:51.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:51 vm00 ceph-mon[96293]: pgmap v121: 161 pgs: 14 active+undersized, 31 peering, 8 active+undersized+degraded, 108 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 29/627 objects degraded (4.625%) 2026-03-10T13:29:51.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:51 vm00 ceph-mon[96293]: from='mon.2 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-10T13:29:51.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:51 vm00 ceph-mon[96293]: Upgrade: unsafe to stop osd(s) at this time (20 PGs are or would become offline) 2026-03-10T13:29:51.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:51 vm00 ceph-mon[96293]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T13:29:51.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:51 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:29:51.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:51 vm00 ceph-mon[96293]: osd.6 [v2:192.168.123.108:6816/1574751781,v1:192.168.123.108:6817/1574751781] boot 2026-03-10T13:29:51.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:51 vm00 ceph-mon[96293]: osdmap e128: 8 total, 8 up, 8 in 2026-03-10T13:29:51.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:51 vm08 ceph-mon[82639]: pgmap v121: 161 pgs: 14 active+undersized, 31 peering, 8 active+undersized+degraded, 108 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 29/627 objects degraded (4.625%) 2026-03-10T13:29:51.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:51 vm08 ceph-mon[82639]: from='mon.2 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-10T13:29:51.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:51 vm08 ceph-mon[82639]: Upgrade: unsafe to stop osd(s) at this time (20 PGs are or would become offline) 2026-03-10T13:29:51.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:51 vm08 ceph-mon[82639]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T13:29:51.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:51 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T13:29:51.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:51 vm08 ceph-mon[82639]: osd.6 [v2:192.168.123.108:6816/1574751781,v1:192.168.123.108:6817/1574751781] boot 2026-03-10T13:29:51.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:51 vm08 ceph-mon[82639]: osdmap e128: 8 total, 8 up, 8 in 2026-03-10T13:29:52.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:52 vm00 ceph-mon[94470]: OSD bench result of 31625.118377 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.6. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-10T13:29:52.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:52 vm00 ceph-mon[94470]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:29:52.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:52 vm00 ceph-mon[94470]: osdmap e129: 8 total, 8 up, 8 in 2026-03-10T13:29:52.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:52 vm00 ceph-mon[96293]: OSD bench result of 31625.118377 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.6. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-10T13:29:52.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:52 vm00 ceph-mon[96293]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:29:52.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:52 vm00 ceph-mon[96293]: osdmap e129: 8 total, 8 up, 8 in 2026-03-10T13:29:52.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:52 vm08 ceph-mon[82639]: OSD bench result of 31625.118377 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.6. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-10T13:29:52.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:52 vm08 ceph-mon[82639]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:29:52.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:52 vm08 ceph-mon[82639]: osdmap e129: 8 total, 8 up, 8 in 2026-03-10T13:29:53.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:53 vm00 ceph-mon[94470]: pgmap v123: 161 pgs: 34 active+undersized, 19 active+undersized+degraded, 108 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 682 B/s rd, 0 op/s; 75/627 objects degraded (11.962%) 2026-03-10T13:29:53.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:53 vm00 ceph-mon[94470]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg inactive, 5 pgs peering) 2026-03-10T13:29:53.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:53 vm00 ceph-mon[96293]: pgmap v123: 161 pgs: 34 active+undersized, 19 active+undersized+degraded, 108 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 682 B/s rd, 0 op/s; 75/627 objects degraded (11.962%) 2026-03-10T13:29:53.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:53 vm00 ceph-mon[96293]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg inactive, 5 pgs peering) 2026-03-10T13:29:53.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:53 vm08 ceph-mon[82639]: pgmap v123: 161 pgs: 34 active+undersized, 19 active+undersized+degraded, 108 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 682 B/s rd, 0 op/s; 75/627 objects degraded (11.962%) 2026-03-10T13:29:53.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:53 vm08 ceph-mon[82639]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg inactive, 5 pgs peering) 2026-03-10T13:29:54.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:54 vm00 ceph-mon[94470]: Health check update: Degraded data redundancy: 75/627 objects degraded (11.962%), 19 pgs degraded (PG_DEGRADED) 2026-03-10T13:29:54.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:54 vm00 ceph-mon[96293]: Health check update: Degraded data redundancy: 75/627 objects degraded (11.962%), 19 pgs degraded (PG_DEGRADED) 2026-03-10T13:29:54.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:54 vm08 ceph-mon[82639]: Health check update: Degraded data redundancy: 75/627 objects degraded (11.962%), 19 pgs degraded (PG_DEGRADED) 2026-03-10T13:29:55.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:55 vm00 ceph-mon[94470]: pgmap v125: 161 pgs: 30 active+undersized, 16 active+undersized+degraded, 115 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 682 B/s rd, 0 op/s; 64/627 objects degraded (10.207%) 2026-03-10T13:29:55.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:55 vm00 ceph-mon[96293]: pgmap v125: 161 pgs: 30 active+undersized, 16 active+undersized+degraded, 115 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 682 B/s rd, 0 op/s; 64/627 objects degraded (10.207%) 2026-03-10T13:29:55.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:55 vm08 ceph-mon[82639]: pgmap v125: 161 pgs: 30 active+undersized, 16 active+undersized+degraded, 115 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 682 B/s rd, 0 op/s; 64/627 objects degraded (10.207%) 2026-03-10T13:29:56.002 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:29:55 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:29:55] "GET /metrics HTTP/1.1" 200 37862 "" "Prometheus/2.51.0" 2026-03-10T13:29:56.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:56 vm00 ceph-mon[94470]: pgmap v126: 161 pgs: 161 active+clean; 457 KiB data, 649 MiB used, 159 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T13:29:56.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:56 vm00 ceph-mon[96293]: pgmap v126: 161 pgs: 161 active+clean; 457 KiB data, 649 MiB used, 159 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T13:29:56.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:56 vm08 ceph-mon[82639]: pgmap v126: 161 pgs: 161 active+clean; 457 KiB data, 649 MiB used, 159 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T13:29:57.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:57 vm00 ceph-mon[94470]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 64/627 objects degraded (10.207%), 16 pgs degraded) 2026-03-10T13:29:57.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:57 vm00 ceph-mon[94470]: Cluster is now healthy 2026-03-10T13:29:57.503 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:29:57 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:29:57.004Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:29:57.503 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:29:57 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:29:57.005Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:29:57.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:57 vm00 ceph-mon[96293]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 64/627 objects degraded (10.207%), 16 pgs degraded) 2026-03-10T13:29:57.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:57 vm00 ceph-mon[96293]: Cluster is now healthy 2026-03-10T13:29:57.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:57 vm08 ceph-mon[82639]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 64/627 objects degraded (10.207%), 16 pgs degraded) 2026-03-10T13:29:57.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:57 vm08 ceph-mon[82639]: Cluster is now healthy 2026-03-10T13:29:58.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:58 vm00 ceph-mon[94470]: pgmap v127: 161 pgs: 161 active+clean; 457 KiB data, 649 MiB used, 159 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T13:29:58.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:29:58 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:29:58.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:58 vm00 ceph-mon[96293]: pgmap v127: 161 pgs: 161 active+clean; 457 KiB data, 649 MiB used, 159 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T13:29:58.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:29:58 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:29:58.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:58 vm08 ceph-mon[82639]: pgmap v127: 161 pgs: 161 active+clean; 457 KiB data, 649 MiB used, 159 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T13:29:58.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:29:58 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:30:00.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:00 vm00 ceph-mon[94470]: pgmap v128: 161 pgs: 161 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 348 B/s rd, 0 op/s 2026-03-10T13:30:00.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:00 vm00 ceph-mon[94470]: overall HEALTH_OK 2026-03-10T13:30:00.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:00 vm00 ceph-mon[96293]: pgmap v128: 161 pgs: 161 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 348 B/s rd, 0 op/s 2026-03-10T13:30:00.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:00 vm00 ceph-mon[96293]: overall HEALTH_OK 2026-03-10T13:30:00.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:00 vm08 ceph-mon[82639]: pgmap v128: 161 pgs: 161 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 348 B/s rd, 0 op/s 2026-03-10T13:30:00.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:00 vm08 ceph-mon[82639]: overall HEALTH_OK 2026-03-10T13:30:01.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:01 vm00 ceph-mon[94470]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:30:01.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:01 vm00 ceph-mon[96293]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:30:01.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:01 vm08 ceph-mon[82639]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:30:02.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:02 vm00 ceph-mon[94470]: pgmap v129: 161 pgs: 161 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 819 B/s rd, 0 op/s 2026-03-10T13:30:02.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:02 vm00 ceph-mon[96293]: pgmap v129: 161 pgs: 161 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 819 B/s rd, 0 op/s 2026-03-10T13:30:02.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:02 vm08 ceph-mon[82639]: pgmap v129: 161 pgs: 161 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 819 B/s rd, 0 op/s 2026-03-10T13:30:03.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:03 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:03.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:03 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:03.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:03 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:04.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:04 vm00 ceph-mon[94470]: pgmap v130: 161 pgs: 161 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 693 B/s rd, 0 op/s 2026-03-10T13:30:04.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:04 vm00 ceph-mon[96293]: pgmap v130: 161 pgs: 161 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 693 B/s rd, 0 op/s 2026-03-10T13:30:04.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:04 vm08 ceph-mon[82639]: pgmap v130: 161 pgs: 161 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 693 B/s rd, 0 op/s 2026-03-10T13:30:05.670 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:05 vm08 systemd[1]: Stopping Ceph osd.7 for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:30:05.670 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:05 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-10T13:30:05.670 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:05 vm08 ceph-mon[82639]: from='mon.2 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-10T13:30:05.670 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:05 vm08 ceph-mon[82639]: Upgrade: osd.7 is safe to restart 2026-03-10T13:30:05.670 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:05 vm08 ceph-mon[82639]: Upgrade: Updating osd.7 2026-03-10T13:30:05.670 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:05 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:05.670 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:05 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-10T13:30:05.670 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:05 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:30:05.670 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:05 vm08 ceph-mon[82639]: Deploying daemon osd.7 on vm08 2026-03-10T13:30:05.690 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:05 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-10T13:30:05.690 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:05 vm00 ceph-mon[94470]: from='mon.2 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-10T13:30:05.690 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:05 vm00 ceph-mon[94470]: Upgrade: osd.7 is safe to restart 2026-03-10T13:30:05.690 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:05 vm00 ceph-mon[94470]: Upgrade: Updating osd.7 2026-03-10T13:30:05.690 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:05 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:05.690 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:05 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-10T13:30:05.690 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:05 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:30:05.690 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:05 vm00 ceph-mon[94470]: Deploying daemon osd.7 on vm08 2026-03-10T13:30:05.690 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:05 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-10T13:30:05.690 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:05 vm00 ceph-mon[96293]: from='mon.2 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-10T13:30:05.690 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:05 vm00 ceph-mon[96293]: Upgrade: osd.7 is safe to restart 2026-03-10T13:30:05.690 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:05 vm00 ceph-mon[96293]: Upgrade: Updating osd.7 2026-03-10T13:30:05.690 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:05 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:05.690 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:05 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-10T13:30:05.690 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:05 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:30:05.690 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:05 vm00 ceph-mon[96293]: Deploying daemon osd.7 on vm08 2026-03-10T13:30:06.002 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:30:05 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:30:05] "GET /metrics HTTP/1.1" 200 37870 "" "Prometheus/2.51.0" 2026-03-10T13:30:06.020 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:05 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[61424]: 2026-03-10T13:30:05.745+0000 7fe4dc783700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.7 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T13:30:06.020 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:05 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[61424]: 2026-03-10T13:30:05.745+0000 7fe4dc783700 -1 osd.7 129 *** Got signal Terminated *** 2026-03-10T13:30:06.020 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:05 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[61424]: 2026-03-10T13:30:05.745+0000 7fe4dc783700 -1 osd.7 129 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T13:30:06.702 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:06 vm08 podman[102406]: 2026-03-10 13:30:06.47726001 +0000 UTC m=+0.747289073 container died bf67951990a5cae1fdaf47d4e5560633e05d0dfa7907799c1fe6a1dcea2d3f74 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7, GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.openshift.expose-services=, vcs-type=git, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, com.redhat.component=centos-stream-container, io.openshift.tags=base centos centos-stream, io.k8s.display-name=CentOS Stream 8, release=754, RELEASE=HEAD, GIT_BRANCH=HEAD, distribution-scope=public, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, maintainer=Guillaume Abrioux , ceph=True, io.buildah.version=1.19.8, name=centos-stream, build-date=2022-05-03T08:36:31.336870, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, version=8, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, CEPH_POINT_RELEASE=-17.2.0, GIT_CLEAN=True, architecture=x86_64, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vendor=Red Hat, Inc.) 2026-03-10T13:30:06.702 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:06 vm08 podman[102406]: 2026-03-10 13:30:06.500768657 +0000 UTC m=+0.770797720 container remove bf67951990a5cae1fdaf47d4e5560633e05d0dfa7907799c1fe6a1dcea2d3f74 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, GIT_BRANCH=HEAD, GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.component=centos-stream-container, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Guillaume Abrioux , distribution-scope=public, release=754, ceph=True, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.buildah.version=1.19.8, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, vcs-type=git, name=centos-stream, io.openshift.expose-services=, io.openshift.tags=base centos centos-stream, RELEASE=HEAD, build-date=2022-05-03T08:36:31.336870, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, architecture=x86_64, io.k8s.display-name=CentOS Stream 8, vendor=Red Hat, Inc., description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, GIT_CLEAN=True, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, version=8, CEPH_POINT_RELEASE=-17.2.0) 2026-03-10T13:30:06.702 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:06 vm08 bash[102406]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7 2026-03-10T13:30:06.702 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:06 vm08 podman[102471]: 2026-03-10 13:30:06.675633474 +0000 UTC m=+0.020820215 container create 28b21714f5f87eb004999b1cc7046fc4dd1c2d232c80317d5d13463a15545d7e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7-deactivate, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T13:30:06.702 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:06 vm08 ceph-mon[82639]: pgmap v131: 161 pgs: 161 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T13:30:06.702 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:06 vm08 ceph-mon[82639]: osd.7 marked itself down and dead 2026-03-10T13:30:06.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:06 vm00 ceph-mon[94470]: pgmap v131: 161 pgs: 161 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T13:30:06.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:06 vm00 ceph-mon[94470]: osd.7 marked itself down and dead 2026-03-10T13:30:06.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:06 vm00 ceph-mon[96293]: pgmap v131: 161 pgs: 161 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T13:30:06.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:06 vm00 ceph-mon[96293]: osd.7 marked itself down and dead 2026-03-10T13:30:06.986 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:06 vm08 podman[102471]: 2026-03-10 13:30:06.720167098 +0000 UTC m=+0.065353839 container init 28b21714f5f87eb004999b1cc7046fc4dd1c2d232c80317d5d13463a15545d7e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7-deactivate, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0) 2026-03-10T13:30:06.986 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:06 vm08 podman[102471]: 2026-03-10 13:30:06.72399811 +0000 UTC m=+0.069184851 container start 28b21714f5f87eb004999b1cc7046fc4dd1c2d232c80317d5d13463a15545d7e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7-deactivate, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, ceph=True, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2) 2026-03-10T13:30:06.987 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:06 vm08 podman[102471]: 2026-03-10 13:30:06.728200837 +0000 UTC m=+0.073387578 container attach 28b21714f5f87eb004999b1cc7046fc4dd1c2d232c80317d5d13463a15545d7e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7-deactivate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T13:30:06.987 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:06 vm08 podman[102471]: 2026-03-10 13:30:06.66549246 +0000 UTC m=+0.010679211 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:30:06.987 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:06 vm08 podman[102471]: 2026-03-10 13:30:06.863001396 +0000 UTC m=+0.208188126 container died 28b21714f5f87eb004999b1cc7046fc4dd1c2d232c80317d5d13463a15545d7e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7-deactivate, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, ceph=True, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, CEPH_REF=squid) 2026-03-10T13:30:06.987 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:06 vm08 podman[102471]: 2026-03-10 13:30:06.891403302 +0000 UTC m=+0.236590043 container remove 28b21714f5f87eb004999b1cc7046fc4dd1c2d232c80317d5d13463a15545d7e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7-deactivate, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0) 2026-03-10T13:30:06.987 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:06 vm08 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.7.service: Deactivated successfully. 2026-03-10T13:30:06.987 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:06 vm08 systemd[1]: Stopped Ceph osd.7 for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:30:06.987 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:06 vm08 systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.7.service: Consumed 7.723s CPU time. 2026-03-10T13:30:07.369 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:07 vm08 systemd[1]: Starting Ceph osd.7 for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:30:07.369 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:07 vm08 podman[102573]: 2026-03-10 13:30:07.239487837 +0000 UTC m=+0.073811973 container create a7c54ce03b96ed32fe496421a71589a03b029a2cd130a4994e02cf89082a7220 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7-activate, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T13:30:07.369 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:07 vm08 podman[102573]: 2026-03-10 13:30:07.17523003 +0000 UTC m=+0.009554176 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:30:07.369 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:07 vm08 podman[102573]: 2026-03-10 13:30:07.282885174 +0000 UTC m=+0.117209320 container init a7c54ce03b96ed32fe496421a71589a03b029a2cd130a4994e02cf89082a7220 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7-activate, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T13:30:07.369 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:07 vm08 podman[102573]: 2026-03-10 13:30:07.286465987 +0000 UTC m=+0.120790123 container start a7c54ce03b96ed32fe496421a71589a03b029a2cd130a4994e02cf89082a7220 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7-activate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223) 2026-03-10T13:30:07.369 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:07 vm08 podman[102573]: 2026-03-10 13:30:07.287669851 +0000 UTC m=+0.121993976 container attach a7c54ce03b96ed32fe496421a71589a03b029a2cd130a4994e02cf89082a7220 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7-activate, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3) 2026-03-10T13:30:07.436 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:30:07 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:30:07.005Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:30:07.436 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:30:07 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:30:07.007Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:30:07.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:07 vm00 ceph-mon[94470]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T13:30:07.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:07 vm00 ceph-mon[94470]: Health check failed: all OSDs are running squid or later but require_osd_release < squid (OSD_UPGRADE_FINISHED) 2026-03-10T13:30:07.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:07 vm00 ceph-mon[94470]: osdmap e130: 8 total, 7 up, 8 in 2026-03-10T13:30:07.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:07 vm00 ceph-mon[96293]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T13:30:07.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:07 vm00 ceph-mon[96293]: Health check failed: all OSDs are running squid or later but require_osd_release < squid (OSD_UPGRADE_FINISHED) 2026-03-10T13:30:07.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:07 vm00 ceph-mon[96293]: osdmap e130: 8 total, 7 up, 8 in 2026-03-10T13:30:07.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:07 vm08 ceph-mon[82639]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T13:30:07.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:07 vm08 ceph-mon[82639]: Health check failed: all OSDs are running squid or later but require_osd_release < squid (OSD_UPGRADE_FINISHED) 2026-03-10T13:30:07.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:07 vm08 ceph-mon[82639]: osdmap e130: 8 total, 7 up, 8 in 2026-03-10T13:30:07.771 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:07 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7-activate[102584]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:30:07.771 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:07 vm08 bash[102573]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:30:07.771 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:07 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7-activate[102584]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:30:07.771 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:07 vm08 bash[102573]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:30:08.229 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:07 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7-activate[102584]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T13:30:08.229 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:07 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7-activate[102584]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:30:08.229 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:07 vm08 bash[102573]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T13:30:08.229 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:07 vm08 bash[102573]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:30:08.229 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:07 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7-activate[102584]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:30:08.229 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:07 vm08 bash[102573]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T13:30:08.229 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:07 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7-activate[102584]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7 2026-03-10T13:30:08.229 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:07 vm08 bash[102573]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7 2026-03-10T13:30:08.229 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:07 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7-activate[102584]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-1481a211-f632-45d8-84f0-ff7eb98ea8fc/osd-block-e33bb491-3f0c-40cc-a5ae-770ea8457536 --path /var/lib/ceph/osd/ceph-7 --no-mon-config 2026-03-10T13:30:08.229 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:07 vm08 bash[102573]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-1481a211-f632-45d8-84f0-ff7eb98ea8fc/osd-block-e33bb491-3f0c-40cc-a5ae-770ea8457536 --path /var/lib/ceph/osd/ceph-7 --no-mon-config 2026-03-10T13:30:08.521 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:08 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7-activate[102584]: Running command: /usr/bin/ln -snf /dev/ceph-1481a211-f632-45d8-84f0-ff7eb98ea8fc/osd-block-e33bb491-3f0c-40cc-a5ae-770ea8457536 /var/lib/ceph/osd/ceph-7/block 2026-03-10T13:30:08.521 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:08 vm08 bash[102573]: Running command: /usr/bin/ln -snf /dev/ceph-1481a211-f632-45d8-84f0-ff7eb98ea8fc/osd-block-e33bb491-3f0c-40cc-a5ae-770ea8457536 /var/lib/ceph/osd/ceph-7/block 2026-03-10T13:30:08.522 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:08 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7-activate[102584]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-7/block 2026-03-10T13:30:08.522 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:08 vm08 bash[102573]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-7/block 2026-03-10T13:30:08.522 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:08 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7-activate[102584]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-10T13:30:08.522 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:08 vm08 bash[102573]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-10T13:30:08.522 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:08 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7-activate[102584]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7 2026-03-10T13:30:08.522 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:08 vm08 bash[102573]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7 2026-03-10T13:30:08.522 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:08 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7-activate[102584]: --> ceph-volume lvm activate successful for osd ID: 7 2026-03-10T13:30:08.522 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:08 vm08 bash[102573]: --> ceph-volume lvm activate successful for osd ID: 7 2026-03-10T13:30:08.522 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:08 vm08 podman[102573]: 2026-03-10 13:30:08.256946993 +0000 UTC m=+1.091271129 container died a7c54ce03b96ed32fe496421a71589a03b029a2cd130a4994e02cf89082a7220 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7-activate, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T13:30:08.522 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:08 vm08 podman[102573]: 2026-03-10 13:30:08.286103412 +0000 UTC m=+1.120427548 container remove a7c54ce03b96ed32fe496421a71589a03b029a2cd130a4994e02cf89082a7220 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7-activate, ceph=True, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T13:30:08.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:08 vm08 ceph-mon[82639]: pgmap v133: 161 pgs: 10 peering, 19 stale+active+clean, 132 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 921 B/s rd, 0 op/s 2026-03-10T13:30:08.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:08 vm08 ceph-mon[82639]: Health check failed: Reduced data availability: 1 pg peering (PG_AVAILABILITY) 2026-03-10T13:30:08.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:08 vm08 ceph-mon[82639]: osdmap e131: 8 total, 7 up, 8 in 2026-03-10T13:30:08.522 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:08 vm08 podman[102826]: 2026-03-10 13:30:08.381600467 +0000 UTC m=+0.020227336 container create f59d4e9eed6e76b7c2de72f50a582c21028230834b9482c45e8260521c8e90a3 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0) 2026-03-10T13:30:08.522 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:08 vm08 podman[102826]: 2026-03-10 13:30:08.412850366 +0000 UTC m=+0.051477235 container init f59d4e9eed6e76b7c2de72f50a582c21028230834b9482c45e8260521c8e90a3 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, CEPH_REF=squid) 2026-03-10T13:30:08.522 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:08 vm08 podman[102826]: 2026-03-10 13:30:08.419202185 +0000 UTC m=+0.057829054 container start f59d4e9eed6e76b7c2de72f50a582c21028230834b9482c45e8260521c8e90a3 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T13:30:08.522 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:08 vm08 bash[102826]: f59d4e9eed6e76b7c2de72f50a582c21028230834b9482c45e8260521c8e90a3 2026-03-10T13:30:08.522 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:08 vm08 podman[102826]: 2026-03-10 13:30:08.373380129 +0000 UTC m=+0.012007008 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:30:08.522 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:08 vm08 systemd[1]: Started Ceph osd.7 for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:30:08.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:08 vm00 ceph-mon[94470]: pgmap v133: 161 pgs: 10 peering, 19 stale+active+clean, 132 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 921 B/s rd, 0 op/s 2026-03-10T13:30:08.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:08 vm00 ceph-mon[94470]: Health check failed: Reduced data availability: 1 pg peering (PG_AVAILABILITY) 2026-03-10T13:30:08.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:08 vm00 ceph-mon[94470]: osdmap e131: 8 total, 7 up, 8 in 2026-03-10T13:30:08.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:08 vm00 ceph-mon[96293]: pgmap v133: 161 pgs: 10 peering, 19 stale+active+clean, 132 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 921 B/s rd, 0 op/s 2026-03-10T13:30:08.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:08 vm00 ceph-mon[96293]: Health check failed: Reduced data availability: 1 pg peering (PG_AVAILABILITY) 2026-03-10T13:30:08.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:08 vm00 ceph-mon[96293]: osdmap e131: 8 total, 7 up, 8 in 2026-03-10T13:30:09.459 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:09 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:30:09.244+0000 7f8ca2e09740 -1 Falling back to public interface 2026-03-10T13:30:09.459 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:09 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:09.459 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:09 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:09.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:09 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:09.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:09 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:09.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:09 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:09.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:09 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:10.673 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:10 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:30:10.380+0000 7f8ca2e09740 -1 osd.7 0 read_superblock omap replica is missing. 2026-03-10T13:30:10.673 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:10 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:30:10.430+0000 7f8ca2e09740 -1 osd.7 129 log_to_monitors true 2026-03-10T13:30:10.674 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:10 vm08 ceph-mon[82639]: pgmap v135: 161 pgs: 16 active+undersized, 10 peering, 12 stale+active+clean, 7 active+undersized+degraded, 116 active+clean; 457 KiB data, 253 MiB used, 160 GiB / 160 GiB avail; 12/627 objects degraded (1.914%) 2026-03-10T13:30:10.674 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:10 vm08 ceph-mon[82639]: Health check failed: Degraded data redundancy: 12/627 objects degraded (1.914%), 7 pgs degraded (PG_DEGRADED) 2026-03-10T13:30:10.674 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:10.674 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:10.674 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:10 vm08 ceph-mon[82639]: from='osd.7 [v2:192.168.123.108:6824/1926395972,v1:192.168.123.108:6825/1926395972]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T13:30:10.674 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:10 vm08 ceph-mon[82639]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T13:30:10.674 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:10.674 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:10 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:10.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:10 vm00 ceph-mon[94470]: pgmap v135: 161 pgs: 16 active+undersized, 10 peering, 12 stale+active+clean, 7 active+undersized+degraded, 116 active+clean; 457 KiB data, 253 MiB used, 160 GiB / 160 GiB avail; 12/627 objects degraded (1.914%) 2026-03-10T13:30:10.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:10 vm00 ceph-mon[94470]: Health check failed: Degraded data redundancy: 12/627 objects degraded (1.914%), 7 pgs degraded (PG_DEGRADED) 2026-03-10T13:30:10.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:10.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:10.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:10 vm00 ceph-mon[94470]: from='osd.7 [v2:192.168.123.108:6824/1926395972,v1:192.168.123.108:6825/1926395972]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T13:30:10.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:10 vm00 ceph-mon[94470]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T13:30:10.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:10.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:10 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:10.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:10 vm00 ceph-mon[96293]: pgmap v135: 161 pgs: 16 active+undersized, 10 peering, 12 stale+active+clean, 7 active+undersized+degraded, 116 active+clean; 457 KiB data, 253 MiB used, 160 GiB / 160 GiB avail; 12/627 objects degraded (1.914%) 2026-03-10T13:30:10.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:10 vm00 ceph-mon[96293]: Health check failed: Degraded data redundancy: 12/627 objects degraded (1.914%), 7 pgs degraded (PG_DEGRADED) 2026-03-10T13:30:10.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:10.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:10.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:10 vm00 ceph-mon[96293]: from='osd.7 [v2:192.168.123.108:6824/1926395972,v1:192.168.123.108:6825/1926395972]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T13:30:10.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:10 vm00 ceph-mon[96293]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T13:30:10.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:10.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:10 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:11.396 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:30:11 vm08 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:30:11.036+0000 7f8c9abb4640 -1 osd.7 129 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T13:30:12.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-10T13:30:12.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='osd.7 [v2:192.168.123.108:6824/1926395972,v1:192.168.123.108:6825/1926395972]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:30:12.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: osdmap e132: 8 total, 7 up, 8 in 2026-03-10T13:30:12.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:30:12.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:30:12.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:12.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:12.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:30:12.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:30:12.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:12.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:30:12.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:12.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]: dispatch 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]: dispatch 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]': finished 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]: dispatch 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]: dispatch 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]': finished 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]: dispatch 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]: dispatch 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]': finished 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]: dispatch 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]: dispatch 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]': finished 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]: dispatch 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]: dispatch 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]': finished 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]: dispatch 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]: dispatch 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]': finished 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]: dispatch 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]: dispatch 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]': finished 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]: dispatch 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]: dispatch 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]': finished 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd require-osd-release", "release": "squid"}]: dispatch 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "osd require-osd-release", "release": "squid"}]: dispatch 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='osd.7 [v2:192.168.123.108:6824/1926395972,v1:192.168.123.108:6825/1926395972]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: osdmap e132: 8 total, 7 up, 8 in 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]: dispatch 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]: dispatch 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]': finished 2026-03-10T13:30:12.504 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]: dispatch 2026-03-10T13:30:12.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]: dispatch 2026-03-10T13:30:12.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]': finished 2026-03-10T13:30:12.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]: dispatch 2026-03-10T13:30:12.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]: dispatch 2026-03-10T13:30:12.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]': finished 2026-03-10T13:30:12.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]: dispatch 2026-03-10T13:30:12.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]: dispatch 2026-03-10T13:30:12.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]': finished 2026-03-10T13:30:12.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]: dispatch 2026-03-10T13:30:12.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]: dispatch 2026-03-10T13:30:12.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]': finished 2026-03-10T13:30:12.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]: dispatch 2026-03-10T13:30:12.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]: dispatch 2026-03-10T13:30:12.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]': finished 2026-03-10T13:30:12.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]: dispatch 2026-03-10T13:30:12.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]: dispatch 2026-03-10T13:30:12.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]': finished 2026-03-10T13:30:12.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]: dispatch 2026-03-10T13:30:12.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]: dispatch 2026-03-10T13:30:12.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]': finished 2026-03-10T13:30:12.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd require-osd-release", "release": "squid"}]: dispatch 2026-03-10T13:30:12.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:12 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "osd require-osd-release", "release": "squid"}]: dispatch 2026-03-10T13:30:12.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-10T13:30:12.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='osd.7 [v2:192.168.123.108:6824/1926395972,v1:192.168.123.108:6825/1926395972]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:30:12.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: osdmap e132: 8 total, 7 up, 8 in 2026-03-10T13:30:12.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-10T13:30:12.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:30:12.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:12.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:12.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:30:12.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:30:12.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:12.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:30:12.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:12.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:12.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:12.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:12.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:12.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]: dispatch 2026-03-10T13:30:12.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]: dispatch 2026-03-10T13:30:12.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]': finished 2026-03-10T13:30:12.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]: dispatch 2026-03-10T13:30:12.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]: dispatch 2026-03-10T13:30:12.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]': finished 2026-03-10T13:30:12.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]: dispatch 2026-03-10T13:30:12.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]: dispatch 2026-03-10T13:30:12.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]': finished 2026-03-10T13:30:12.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]: dispatch 2026-03-10T13:30:12.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]: dispatch 2026-03-10T13:30:12.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]': finished 2026-03-10T13:30:12.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]: dispatch 2026-03-10T13:30:12.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]: dispatch 2026-03-10T13:30:12.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]': finished 2026-03-10T13:30:12.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]: dispatch 2026-03-10T13:30:12.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]: dispatch 2026-03-10T13:30:12.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]': finished 2026-03-10T13:30:12.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]: dispatch 2026-03-10T13:30:12.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]: dispatch 2026-03-10T13:30:12.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]': finished 2026-03-10T13:30:12.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]: dispatch 2026-03-10T13:30:12.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]: dispatch 2026-03-10T13:30:12.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]': finished 2026-03-10T13:30:12.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd require-osd-release", "release": "squid"}]: dispatch 2026-03-10T13:30:12.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:12 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "osd require-osd-release", "release": "squid"}]: dispatch 2026-03-10T13:30:13.007 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: pgmap v137: 161 pgs: 37 active+undersized, 10 peering, 22 active+undersized+degraded, 92 active+clean; 457 KiB data, 274 MiB used, 160 GiB / 160 GiB avail; 68/627 objects degraded (10.845%) 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all osd 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: Upgrade: Setting require_osd_release to 19 squid 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: Health check cleared: OSD_UPGRADE_FINISHED (was: all OSDs are running squid or later but require_osd_release < squid) 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "osd require-osd-release", "release": "squid"}]': finished 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: osd.7 [v2:192.168.123.108:6824/1926395972,v1:192.168.123.108:6825/1926395972] boot 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: osdmap e133: 8 total, 8 up, 8 in 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all mds 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all rbd-mirror 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all ceph-exporter 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all nfs 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd"}]': finished 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-10T13:30:13.223 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: pgmap v137: 161 pgs: 37 active+undersized, 10 peering, 22 active+undersized+degraded, 92 active+clean; 457 KiB data, 274 MiB used, 160 GiB / 160 GiB avail; 68/627 objects degraded (10.845%) 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all osd 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: Upgrade: Setting require_osd_release to 19 squid 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: Health check cleared: OSD_UPGRADE_FINISHED (was: all OSDs are running squid or later but require_osd_release < squid) 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "osd require-osd-release", "release": "squid"}]': finished 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:30:13.224 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: osd.7 [v2:192.168.123.108:6824/1926395972,v1:192.168.123.108:6825/1926395972] boot 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: osdmap e133: 8 total, 8 up, 8 in 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all mds 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all rbd-mirror 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all ceph-exporter 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all nfs 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd"}]': finished 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.225 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.226 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.226 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.226 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.226 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-10T13:30:13.226 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-10T13:30:13.226 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-10T13:30:13.226 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:30:13.226 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:30:13.226 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:13.226 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:30:13.226 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:30:13.226 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:13.226 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:13.226 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:30:13.226 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:13 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:13.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: pgmap v137: 161 pgs: 37 active+undersized, 10 peering, 22 active+undersized+degraded, 92 active+clean; 457 KiB data, 274 MiB used, 160 GiB / 160 GiB avail; 68/627 objects degraded (10.845%) 2026-03-10T13:30:13.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all osd 2026-03-10T13:30:13.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: Upgrade: Setting require_osd_release to 19 squid 2026-03-10T13:30:13.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T13:30:13.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: Health check cleared: OSD_UPGRADE_FINISHED (was: all OSDs are running squid or later but require_osd_release < squid) 2026-03-10T13:30:13.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "osd require-osd-release", "release": "squid"}]': finished 2026-03-10T13:30:13.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T13:30:13.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: osd.7 [v2:192.168.123.108:6824/1926395972,v1:192.168.123.108:6825/1926395972] boot 2026-03-10T13:30:13.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: osdmap e133: 8 total, 8 up, 8 in 2026-03-10T13:30:13.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:13.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all mds 2026-03-10T13:30:13.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:13.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:13.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all rbd-mirror 2026-03-10T13:30:13.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:13.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:13.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:13.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all ceph-exporter 2026-03-10T13:30:13.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:13.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:13.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all nfs 2026-03-10T13:30:13.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:13.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:13.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:13.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:13.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd"}]': finished 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:13.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-10T13:30:13.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-10T13:30:13.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-10T13:30:13.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:30:13.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:30:13.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:13.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:30:13.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:30:13.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:13.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:13.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:30:13.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:13 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:13.581 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T13:30:13.581 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (16m) 89s ago 23m 26.4M - 0.25.0 c8568f914cd2 12fde3cf83cb 2026-03-10T13:30:13.581 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm08 *:3000 running (4m) 3s ago 23m 75.1M - 10.4.0 c8b91775d855 960e32589e98 2026-03-10T13:30:13.581 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dezodo vm00 running (4m) 89s ago 22m 49.4M - 3.5 e1d6a67b021e 630bf6d4e7f3 2026-03-10T13:30:13.581 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm08 *:8443,9283,8765 running (4m) 3s ago 24m 488M - 19.2.3-678-ge911bdeb 654f31e6858e 31b91eebc856 2026-03-10T13:30:13.581 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (11m) 89s ago 25m 555M - 19.2.3-678-ge911bdeb 654f31e6858e 5bc576d4d32b 2026-03-10T13:30:13.581 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (3m) 89s ago 25m 49.3M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 981df6371890 2026-03-10T13:30:13.581 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm08 running (3m) 3s ago 25m 50.7M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 8cceb678a9ee 2026-03-10T13:30:13.581 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (3m) 89s ago 25m 40.0M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 43deda66dee3 2026-03-10T13:30:13.581 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (16m) 89s ago 23m 10.8M - 1.7.0 72c9c2088986 bcf883401619 2026-03-10T13:30:13.581 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm08 *:9100 running (16m) 3s ago 23m 10.2M - 1.7.0 72c9c2088986 4ac83f03f818 2026-03-10T13:30:13.581 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (2m) 89s ago 24m 46.9M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 5fc74f4d2179 2026-03-10T13:30:13.581 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (2m) 89s ago 24m 68.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e dc65e199e9eb 2026-03-10T13:30:13.581 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (2m) 89s ago 24m 66.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 706171e0f5c2 2026-03-10T13:30:13.581 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (90s) 89s ago 24m 13.0M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 8739c77cf14d 2026-03-10T13:30:13.581 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm08 running (70s) 3s ago 24m 72.8M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 5b92674798b7 2026-03-10T13:30:13.581 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm08 running (49s) 3s ago 23m 46.9M 4096M 19.2.3-678-ge911bdeb 654f31e6858e e197b6bd6561 2026-03-10T13:30:13.581 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm08 running (27s) 3s ago 23m 69.9M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 529ca6d92bc2 2026-03-10T13:30:13.581 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm08 running (5s) 3s ago 23m 13.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e f59d4e9eed6e 2026-03-10T13:30:13.581 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm08 *:9095 running (4m) 3s ago 23m 48.5M - 2.51.0 1d3b7f56885b 3f9b2d0821c9 2026-03-10T13:30:13.581 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.tvlvzo vm00 *:8000 running (23m) 89s ago 23m 98.1M - 17.2.0 e1d6a67b021e be24eac16807 2026-03-10T13:30:13.581 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm08.ljayps vm08 *:8000 running (22m) 3s ago 22m 95.9M - 17.2.0 e1d6a67b021e 67b525427823 2026-03-10T13:30:13.659 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.osd | length == 1'"'"'' 2026-03-10T13:30:14.235 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T13:30:14.280 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.osd | keys'"'"' | grep $sha1' 2026-03-10T13:30:14.453 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:14 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all nvmeof 2026-03-10T13:30:14.453 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:14 vm00 ceph-mon[94470]: Upgrade: Finalizing container_image settings 2026-03-10T13:30:14.453 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:14 vm00 ceph-mon[94470]: Upgrade: Complete! 2026-03-10T13:30:14.453 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:14 vm00 ceph-mon[94470]: from='client.54304 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:14.453 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:14 vm00 ceph-mon[94470]: osdmap e134: 8 total, 8 up, 8 in 2026-03-10T13:30:14.455 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:14 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all nvmeof 2026-03-10T13:30:14.455 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:14 vm00 ceph-mon[96293]: Upgrade: Finalizing container_image settings 2026-03-10T13:30:14.455 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:14 vm00 ceph-mon[96293]: Upgrade: Complete! 2026-03-10T13:30:14.455 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:14 vm00 ceph-mon[96293]: from='client.54304 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:14.455 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:14 vm00 ceph-mon[96293]: osdmap e134: 8 total, 8 up, 8 in 2026-03-10T13:30:14.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:14 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all nvmeof 2026-03-10T13:30:14.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:14 vm08 ceph-mon[82639]: Upgrade: Finalizing container_image settings 2026-03-10T13:30:14.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:14 vm08 ceph-mon[82639]: Upgrade: Complete! 2026-03-10T13:30:14.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:14 vm08 ceph-mon[82639]: from='client.54304 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:14.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:14 vm08 ceph-mon[82639]: osdmap e134: 8 total, 8 up, 8 in 2026-03-10T13:30:14.862 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)" 2026-03-10T13:30:14.924 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade status' 2026-03-10T13:30:15.115 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:15 vm00 ceph-mon[94470]: pgmap v140: 161 pgs: 42 active+undersized, 27 active+undersized+degraded, 92 active+clean; 457 KiB data, 274 MiB used, 160 GiB / 160 GiB avail; 84/627 objects degraded (13.397%) 2026-03-10T13:30:15.116 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:15 vm00 ceph-mon[94470]: from='client.54307 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:15.116 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:15 vm00 ceph-mon[94470]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg peering) 2026-03-10T13:30:15.116 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:15 vm00 ceph-mon[94470]: from='client.? 192.168.123.100:0/2517349693' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:15.116 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:15 vm00 ceph-mon[94470]: from='client.? 192.168.123.100:0/2763786448' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:15.116 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:15 vm00 ceph-mon[96293]: pgmap v140: 161 pgs: 42 active+undersized, 27 active+undersized+degraded, 92 active+clean; 457 KiB data, 274 MiB used, 160 GiB / 160 GiB avail; 84/627 objects degraded (13.397%) 2026-03-10T13:30:15.116 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:15 vm00 ceph-mon[96293]: from='client.54307 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:15.116 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:15 vm00 ceph-mon[96293]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg peering) 2026-03-10T13:30:15.116 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:15 vm00 ceph-mon[96293]: from='client.? 192.168.123.100:0/2517349693' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:15.116 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:15 vm00 ceph-mon[96293]: from='client.? 192.168.123.100:0/2763786448' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:15.510 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:30:15.510 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": null, 2026-03-10T13:30:15.510 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": false, 2026-03-10T13:30:15.510 INFO:teuthology.orchestra.run.vm00.stdout: "which": "", 2026-03-10T13:30:15.510 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [], 2026-03-10T13:30:15.510 INFO:teuthology.orchestra.run.vm00.stdout: "progress": null, 2026-03-10T13:30:15.510 INFO:teuthology.orchestra.run.vm00.stdout: "message": "", 2026-03-10T13:30:15.510 INFO:teuthology.orchestra.run.vm00.stdout: "is_paused": false 2026-03-10T13:30:15.510 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:30:15.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:15 vm08 ceph-mon[82639]: pgmap v140: 161 pgs: 42 active+undersized, 27 active+undersized+degraded, 92 active+clean; 457 KiB data, 274 MiB used, 160 GiB / 160 GiB avail; 84/627 objects degraded (13.397%) 2026-03-10T13:30:15.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:15 vm08 ceph-mon[82639]: from='client.54307 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:15.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:15 vm08 ceph-mon[82639]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg peering) 2026-03-10T13:30:15.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:15 vm08 ceph-mon[82639]: from='client.? 192.168.123.100:0/2517349693' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:15.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:15 vm08 ceph-mon[82639]: from='client.? 192.168.123.100:0/2763786448' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:15.578 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph health detail' 2026-03-10T13:30:16.003 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:30:15 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:30:15] "GET /metrics HTTP/1.1" 200 37744 "" "Prometheus/2.51.0" 2026-03-10T13:30:16.153 INFO:teuthology.orchestra.run.vm00.stdout:HEALTH_WARN Degraded data redundancy: 44/627 objects degraded (7.018%), 11 pgs degraded 2026-03-10T13:30:16.153 INFO:teuthology.orchestra.run.vm00.stdout:[WRN] PG_DEGRADED: Degraded data redundancy: 44/627 objects degraded (7.018%), 11 pgs degraded 2026-03-10T13:30:16.153 INFO:teuthology.orchestra.run.vm00.stdout: pg 1.0 is active+undersized+degraded, acting [0,6] 2026-03-10T13:30:16.153 INFO:teuthology.orchestra.run.vm00.stdout: pg 2.12 is active+undersized+degraded, acting [5,3] 2026-03-10T13:30:16.153 INFO:teuthology.orchestra.run.vm00.stdout: pg 2.18 is active+undersized+degraded, acting [5,4] 2026-03-10T13:30:16.154 INFO:teuthology.orchestra.run.vm00.stdout: pg 3.d is active+undersized+degraded, acting [5,6] 2026-03-10T13:30:16.154 INFO:teuthology.orchestra.run.vm00.stdout: pg 3.e is active+undersized+degraded, acting [4,1] 2026-03-10T13:30:16.154 INFO:teuthology.orchestra.run.vm00.stdout: pg 3.f is active+undersized+degraded, acting [4,0] 2026-03-10T13:30:16.154 INFO:teuthology.orchestra.run.vm00.stdout: pg 3.11 is active+undersized+degraded, acting [4,6] 2026-03-10T13:30:16.154 INFO:teuthology.orchestra.run.vm00.stdout: pg 3.13 is active+undersized+degraded, acting [4,2] 2026-03-10T13:30:16.154 INFO:teuthology.orchestra.run.vm00.stdout: pg 3.15 is active+undersized+degraded, acting [3,4] 2026-03-10T13:30:16.154 INFO:teuthology.orchestra.run.vm00.stdout: pg 3.16 is active+undersized+degraded, acting [5,1] 2026-03-10T13:30:16.154 INFO:teuthology.orchestra.run.vm00.stdout: pg 4.15 is active+undersized+degraded, acting [5,3] 2026-03-10T13:30:16.224 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --services rgw.foo' 2026-03-10T13:30:16.401 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:16 vm00 ceph-mon[94470]: pgmap v141: 161 pgs: 22 active+undersized, 11 active+undersized+degraded, 128 active+clean; 457 KiB data, 274 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 44/627 objects degraded (7.018%) 2026-03-10T13:30:16.401 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:16 vm00 ceph-mon[94470]: from='client.44391 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:16.401 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:16 vm00 ceph-mon[94470]: Health check update: Degraded data redundancy: 44/627 objects degraded (7.018%), 11 pgs degraded (PG_DEGRADED) 2026-03-10T13:30:16.401 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:16 vm00 ceph-mon[96293]: pgmap v141: 161 pgs: 22 active+undersized, 11 active+undersized+degraded, 128 active+clean; 457 KiB data, 274 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 44/627 objects degraded (7.018%) 2026-03-10T13:30:16.401 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:16 vm00 ceph-mon[96293]: from='client.44391 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:16.401 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:16 vm00 ceph-mon[96293]: Health check update: Degraded data redundancy: 44/627 objects degraded (7.018%), 11 pgs degraded (PG_DEGRADED) 2026-03-10T13:30:16.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:16 vm08 ceph-mon[82639]: pgmap v141: 161 pgs: 22 active+undersized, 11 active+undersized+degraded, 128 active+clean; 457 KiB data, 274 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 44/627 objects degraded (7.018%) 2026-03-10T13:30:16.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:16 vm08 ceph-mon[82639]: from='client.44391 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:16.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:16 vm08 ceph-mon[82639]: Health check update: Degraded data redundancy: 44/627 objects degraded (7.018%), 11 pgs degraded (PG_DEGRADED) 2026-03-10T13:30:17.084 INFO:teuthology.orchestra.run.vm00.stdout:Initiating upgrade to quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:30:17.172 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'while ceph orch upgrade status | jq '"'"'.in_progress'"'"' | grep true && ! ceph orch upgrade status | jq '"'"'.message'"'"' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done' 2026-03-10T13:30:17.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:17 vm00 ceph-mon[94470]: from='client.? 192.168.123.100:0/3226363848' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T13:30:17.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:17 vm00 ceph-mon[94470]: from='client.34397 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "services": "rgw.foo", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:17.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:17 vm00 ceph-mon[94470]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:30:17.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:17 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:17.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:17 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:30:17.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:17 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:30:17.205 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:17 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:17.205 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:17 vm00 ceph-mon[96293]: from='client.? 192.168.123.100:0/3226363848' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T13:30:17.205 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:17 vm00 ceph-mon[96293]: from='client.34397 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "services": "rgw.foo", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:17.205 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:17 vm00 ceph-mon[96293]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:30:17.205 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:17 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:17.205 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:17 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:30:17.205 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:17 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:30:17.205 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:17 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:17.205 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:30:17 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:30:17.006Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:30:17.205 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:30:17 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:30:17.012Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:30:17.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:17 vm08 ceph-mon[82639]: from='client.? 192.168.123.100:0/3226363848' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T13:30:17.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:17 vm08 ceph-mon[82639]: from='client.34397 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "services": "rgw.foo", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:17.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:17 vm08 ceph-mon[82639]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:30:17.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:17 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:17.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:17 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:30:17.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:17 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:30:17.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:17 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:17.828 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T13:30:18.321 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T13:30:18.321 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (16m) 94s ago 23m 26.4M - 0.25.0 c8568f914cd2 12fde3cf83cb 2026-03-10T13:30:18.321 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm08 *:3000 running (4m) 8s ago 23m 75.1M - 10.4.0 c8b91775d855 960e32589e98 2026-03-10T13:30:18.321 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dezodo vm00 running (4m) 94s ago 22m 49.4M - 3.5 e1d6a67b021e 630bf6d4e7f3 2026-03-10T13:30:18.321 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm08 *:8443,9283,8765 running (4m) 8s ago 24m 488M - 19.2.3-678-ge911bdeb 654f31e6858e 31b91eebc856 2026-03-10T13:30:18.321 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (11m) 94s ago 25m 555M - 19.2.3-678-ge911bdeb 654f31e6858e 5bc576d4d32b 2026-03-10T13:30:18.321 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (3m) 94s ago 25m 49.3M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 981df6371890 2026-03-10T13:30:18.321 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm08 running (4m) 8s ago 25m 50.7M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 8cceb678a9ee 2026-03-10T13:30:18.322 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (3m) 94s ago 25m 40.0M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 43deda66dee3 2026-03-10T13:30:18.322 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (16m) 94s ago 23m 10.8M - 1.7.0 72c9c2088986 bcf883401619 2026-03-10T13:30:18.322 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm08 *:9100 running (16m) 8s ago 23m 10.2M - 1.7.0 72c9c2088986 4ac83f03f818 2026-03-10T13:30:18.322 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (2m) 94s ago 24m 46.9M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 5fc74f4d2179 2026-03-10T13:30:18.322 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (2m) 94s ago 24m 68.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e dc65e199e9eb 2026-03-10T13:30:18.322 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (2m) 94s ago 24m 66.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 706171e0f5c2 2026-03-10T13:30:18.322 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (95s) 94s ago 24m 13.0M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 8739c77cf14d 2026-03-10T13:30:18.322 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm08 running (74s) 8s ago 24m 72.8M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 5b92674798b7 2026-03-10T13:30:18.322 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm08 running (53s) 8s ago 24m 46.9M 4096M 19.2.3-678-ge911bdeb 654f31e6858e e197b6bd6561 2026-03-10T13:30:18.322 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm08 running (31s) 8s ago 23m 69.9M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 529ca6d92bc2 2026-03-10T13:30:18.322 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm08 running (9s) 8s ago 23m 13.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e f59d4e9eed6e 2026-03-10T13:30:18.322 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm08 *:9095 running (4m) 8s ago 23m 48.5M - 2.51.0 1d3b7f56885b 3f9b2d0821c9 2026-03-10T13:30:18.322 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.tvlvzo vm00 *:8000 running (23m) 94s ago 23m 98.1M - 17.2.0 e1d6a67b021e be24eac16807 2026-03-10T13:30:18.322 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm08.ljayps vm08 *:8000 running (23m) 8s ago 23m 95.9M - 17.2.0 e1d6a67b021e 67b525427823 2026-03-10T13:30:18.655 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:30:18.655 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T13:30:18.655 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-10T13:30:18.655 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:30:18.655 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T13:30:18.655 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T13:30:18.655 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:30:18.655 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T13:30:18.656 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 8 2026-03-10T13:30:18.656 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:30:18.656 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T13:30:18.656 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-10T13:30:18.656 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:30:18.656 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T13:30:18.656 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2, 2026-03-10T13:30:18.656 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 13 2026-03-10T13:30:18.656 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T13:30:18.656 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:30:18.872 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:30:18.873 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T13:30:18.873 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": true, 2026-03-10T13:30:18.873 INFO:teuthology.orchestra.run.vm00.stdout: "which": "Upgrading daemons in service(s) rgw.foo", 2026-03-10T13:30:18.873 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [], 2026-03-10T13:30:18.873 INFO:teuthology.orchestra.run.vm00.stdout: "progress": "0/2 daemons upgraded", 2026-03-10T13:30:18.873 INFO:teuthology.orchestra.run.vm00.stdout: "message": "Currently upgrading rgw daemons", 2026-03-10T13:30:18.873 INFO:teuthology.orchestra.run.vm00.stdout: "is_paused": false 2026-03-10T13:30:18.873 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:30:18.873 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[94470]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:30:18.873 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[94470]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 809 B/s rd, 0 op/s 2026-03-10T13:30:18.873 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:18.873 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[94470]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-10T13:30:18.873 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[94470]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-10T13:30:18.873 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:30:18.873 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:18.873 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all mgr 2026-03-10T13:30:18.873 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:18.873 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:18.873 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all mon 2026-03-10T13:30:18.873 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:18.873 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:18.873 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all crash 2026-03-10T13:30:18.873 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:18.873 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:18.873 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all osd 2026-03-10T13:30:18.873 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:18.873 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:18.873 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all mds 2026-03-10T13:30:18.873 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:18.873 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[94470]: from='client.54334 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:18.873 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[94470]: from='client.54340 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:18.873 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[94470]: Upgrade: Updating rgw.foo.vm00.tvlvzo (1/2) 2026-03-10T13:30:18.873 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:18.873 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm00.tvlvzo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T13:30:18.873 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm00.tvlvzo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T13:30:18.873 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:30:18.873 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[94470]: Deploying daemon rgw.foo.vm00.tvlvzo on vm00 2026-03-10T13:30:18.874 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[94470]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 44/627 objects degraded (7.018%), 11 pgs degraded) 2026-03-10T13:30:18.874 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[94470]: Cluster is now healthy 2026-03-10T13:30:18.874 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[96293]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:30:18.874 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[96293]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 809 B/s rd, 0 op/s 2026-03-10T13:30:18.874 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:18.874 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[96293]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-10T13:30:18.874 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[96293]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-10T13:30:18.874 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:30:18.874 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:18.874 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all mgr 2026-03-10T13:30:18.874 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:18.874 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:18.874 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all mon 2026-03-10T13:30:18.874 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:18.874 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:18.874 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all crash 2026-03-10T13:30:18.874 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:18.874 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:18.874 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all osd 2026-03-10T13:30:18.874 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:18.874 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:18.874 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all mds 2026-03-10T13:30:18.874 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:18.874 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[96293]: from='client.54334 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:18.874 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[96293]: from='client.54340 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:18.874 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[96293]: Upgrade: Updating rgw.foo.vm00.tvlvzo (1/2) 2026-03-10T13:30:18.874 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:18.874 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm00.tvlvzo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T13:30:18.874 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm00.tvlvzo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T13:30:18.874 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:30:18.874 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[96293]: Deploying daemon rgw.foo.vm00.tvlvzo on vm00 2026-03-10T13:30:18.874 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[96293]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 44/627 objects degraded (7.018%), 11 pgs degraded) 2026-03-10T13:30:18.874 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:18 vm00 ceph-mon[96293]: Cluster is now healthy 2026-03-10T13:30:19.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:18 vm08 ceph-mon[82639]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:30:19.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:18 vm08 ceph-mon[82639]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 809 B/s rd, 0 op/s 2026-03-10T13:30:19.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:18 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:19.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:18 vm08 ceph-mon[82639]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-10T13:30:19.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:18 vm08 ceph-mon[82639]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-10T13:30:19.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:18 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:30:19.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:18 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:19.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:18 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all mgr 2026-03-10T13:30:19.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:18 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:19.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:18 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:19.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:18 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all mon 2026-03-10T13:30:19.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:18 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:19.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:18 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:19.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:18 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all crash 2026-03-10T13:30:19.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:18 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:19.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:18 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:19.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:18 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all osd 2026-03-10T13:30:19.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:18 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:19.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:18 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:19.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:18 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all mds 2026-03-10T13:30:19.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:18 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:19.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:18 vm08 ceph-mon[82639]: from='client.54334 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:19.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:18 vm08 ceph-mon[82639]: from='client.54340 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:19.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:18 vm08 ceph-mon[82639]: Upgrade: Updating rgw.foo.vm00.tvlvzo (1/2) 2026-03-10T13:30:19.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:18 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:19.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:18 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm00.tvlvzo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T13:30:19.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:18 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm00.tvlvzo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T13:30:19.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:18 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:30:19.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:18 vm08 ceph-mon[82639]: Deploying daemon rgw.foo.vm00.tvlvzo on vm00 2026-03-10T13:30:19.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:18 vm08 ceph-mon[82639]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 44/627 objects degraded (7.018%), 11 pgs degraded) 2026-03-10T13:30:19.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:18 vm08 ceph-mon[82639]: Cluster is now healthy 2026-03-10T13:30:19.653 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:19 vm08 ceph-mon[82639]: from='client.44418 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:19.653 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:19 vm08 ceph-mon[82639]: from='client.? 192.168.123.100:0/1326938926' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:19.653 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:19 vm08 ceph-mon[82639]: from='client.44430 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:19.653 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:19 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:19.653 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:19 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:19.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:19 vm00 ceph-mon[94470]: from='client.44418 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:19.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:19 vm00 ceph-mon[94470]: from='client.? 192.168.123.100:0/1326938926' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:19.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:19 vm00 ceph-mon[94470]: from='client.44430 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:19.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:19 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:19.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:19 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:19.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:19 vm00 ceph-mon[96293]: from='client.44418 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:19.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:19 vm00 ceph-mon[96293]: from='client.? 192.168.123.100:0/1326938926' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:19.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:19 vm00 ceph-mon[96293]: from='client.44430 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:19.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:19 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:19.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:19 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:21.327 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:21 vm08 ceph-mon[82639]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 283 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-10T13:30:21.327 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:21 vm08 ceph-mon[82639]: Upgrade: Updating rgw.foo.vm08.ljayps (2/2) 2026-03-10T13:30:21.327 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:21 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:21.327 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:21 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm08.ljayps", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T13:30:21.327 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:21 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm08.ljayps", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T13:30:21.327 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:21 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:30:21.327 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:21 vm08 ceph-mon[82639]: Deploying daemon rgw.foo.vm08.ljayps on vm08 2026-03-10T13:30:21.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:21 vm00 ceph-mon[94470]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 283 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-10T13:30:21.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:21 vm00 ceph-mon[94470]: Upgrade: Updating rgw.foo.vm08.ljayps (2/2) 2026-03-10T13:30:21.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:21 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:21.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:21 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm08.ljayps", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T13:30:21.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:21 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm08.ljayps", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T13:30:21.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:21 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:30:21.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:21 vm00 ceph-mon[94470]: Deploying daemon rgw.foo.vm08.ljayps on vm08 2026-03-10T13:30:21.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:21 vm00 ceph-mon[96293]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 283 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-10T13:30:21.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:21 vm00 ceph-mon[96293]: Upgrade: Updating rgw.foo.vm08.ljayps (2/2) 2026-03-10T13:30:21.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:21 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:21.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:21 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm08.ljayps", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T13:30:21.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:21 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm08.ljayps", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T13:30:21.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:21 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:30:21.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:21 vm00 ceph-mon[96293]: Deploying daemon rgw.foo.vm08.ljayps on vm08 2026-03-10T13:30:22.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:22 vm00 ceph-mon[94470]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:30:22.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:22 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:22.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:22 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:22.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:22 vm00 ceph-mon[96293]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:30:22.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:22 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:22.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:22 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:22.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:22 vm08 ceph-mon[82639]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:30:22.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:22 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:22.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:22 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:23.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:23 vm00 ceph-mon[94470]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 283 MiB used, 160 GiB / 160 GiB avail; 22 KiB/s rd, 110 B/s wr, 33 op/s 2026-03-10T13:30:23.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:23 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:23.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:23 vm00 ceph-mon[96293]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 283 MiB used, 160 GiB / 160 GiB avail; 22 KiB/s rd, 110 B/s wr, 33 op/s 2026-03-10T13:30:23.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:23 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:23.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:23 vm08 ceph-mon[82639]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 283 MiB used, 160 GiB / 160 GiB avail; 22 KiB/s rd, 110 B/s wr, 33 op/s 2026-03-10T13:30:23.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:23 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:24.076 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:24 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:24.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:24 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:24.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:24 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:24.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:24 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:24.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:24 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:24.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:24 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:24.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:24 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:24.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:24 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:24.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:24 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:24.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:24 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:24.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:24 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:24.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:24 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:24.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:24 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:24.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:24 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:24.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:24 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:24.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:24 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:24.254 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:24 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:24.448 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:24 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:24.448 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:24 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:24.448 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:24 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:24.448 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:24 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:24.448 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:24 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:24.448 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:24 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:24.448 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:24 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:25.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 35 KiB/s rd, 99 B/s wr, 53 op/s 2026-03-10T13:30:25.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:25.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:25.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:25.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:25.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:30:25.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:30:25.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:25.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:30:25.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:25.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm00.tvlvzo"}]: dispatch 2026-03-10T13:30:25.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm00.tvlvzo"}]: dispatch 2026-03-10T13:30:25.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm00.tvlvzo"}]': finished 2026-03-10T13:30:25.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm08.ljayps"}]: dispatch 2026-03-10T13:30:25.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm08.ljayps"}]: dispatch 2026-03-10T13:30:25.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm08.ljayps"}]': finished 2026-03-10T13:30:25.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:25.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd"}]': finished 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]': finished 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:30:25.504 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 35 KiB/s rd, 99 B/s wr, 53 op/s 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm00.tvlvzo"}]: dispatch 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm00.tvlvzo"}]: dispatch 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm00.tvlvzo"}]': finished 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm08.ljayps"}]: dispatch 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm08.ljayps"}]: dispatch 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm08.ljayps"}]': finished 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.505 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd"}]': finished 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]': finished 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:30:25.506 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:25 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:30:25.521 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 35 KiB/s rd, 99 B/s wr, 53 op/s 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm00.tvlvzo"}]: dispatch 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm00.tvlvzo"}]: dispatch 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm00.tvlvzo"}]': finished 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm08.ljayps"}]: dispatch 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm08.ljayps"}]: dispatch 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm08.ljayps"}]': finished 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd"}]': finished 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]': finished 2026-03-10T13:30:25.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-10T13:30:25.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-10T13:30:25.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-10T13:30:25.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-10T13:30:25.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-10T13:30:25.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-10T13:30:25.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-10T13:30:25.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-10T13:30:25.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-10T13:30:25.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-10T13:30:25.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-10T13:30:25.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-10T13:30:25.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-10T13:30:25.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-10T13:30:25.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-10T13:30:25.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-10T13:30:25.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-10T13:30:25.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-10T13:30:25.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:30:25.523 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:25 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:30:26.003 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:30:25 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:30:25] "GET /metrics HTTP/1.1" 200 37744 "" "Prometheus/2.51.0" 2026-03-10T13:30:26.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:26 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all rgw 2026-03-10T13:30:26.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:26 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all rbd-mirror 2026-03-10T13:30:26.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:26 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all ceph-exporter 2026-03-10T13:30:26.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:26 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all nfs 2026-03-10T13:30:26.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:26 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all nvmeof 2026-03-10T13:30:26.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:26 vm00 ceph-mon[94470]: Upgrade: Finalizing container_image settings 2026-03-10T13:30:26.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:26 vm00 ceph-mon[94470]: Upgrade: Complete! 2026-03-10T13:30:26.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:26 vm00 ceph-mon[94470]: Checking dashboard <-> RGW credentials 2026-03-10T13:30:26.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:26 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:26.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:26 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:30:26.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:26 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:30:26.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:26 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:26.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:26 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all rgw 2026-03-10T13:30:26.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:26 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all rbd-mirror 2026-03-10T13:30:26.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:26 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all ceph-exporter 2026-03-10T13:30:26.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:26 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all nfs 2026-03-10T13:30:26.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:26 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all nvmeof 2026-03-10T13:30:26.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:26 vm00 ceph-mon[96293]: Upgrade: Finalizing container_image settings 2026-03-10T13:30:26.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:26 vm00 ceph-mon[96293]: Upgrade: Complete! 2026-03-10T13:30:26.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:26 vm00 ceph-mon[96293]: Checking dashboard <-> RGW credentials 2026-03-10T13:30:26.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:26 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:26.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:26 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:30:26.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:26 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:30:26.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:26 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:26.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:26 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all rgw 2026-03-10T13:30:26.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:26 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all rbd-mirror 2026-03-10T13:30:26.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:26 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all ceph-exporter 2026-03-10T13:30:26.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:26 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all nfs 2026-03-10T13:30:26.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:26 vm08 ceph-mon[82639]: Upgrade: Setting container_image for all nvmeof 2026-03-10T13:30:26.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:26 vm08 ceph-mon[82639]: Upgrade: Finalizing container_image settings 2026-03-10T13:30:26.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:26 vm08 ceph-mon[82639]: Upgrade: Complete! 2026-03-10T13:30:26.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:26 vm08 ceph-mon[82639]: Checking dashboard <-> RGW credentials 2026-03-10T13:30:26.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:26 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:26.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:26 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:30:26.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:26 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:30:26.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:26 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:27.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:27 vm00 ceph-mon[94470]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 96 KiB/s rd, 170 B/s wr, 147 op/s 2026-03-10T13:30:27.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:27 vm00 ceph-mon[96293]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 96 KiB/s rd, 170 B/s wr, 147 op/s 2026-03-10T13:30:27.503 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:30:27 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:30:27.009Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:30:27.503 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:30:27 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:30:27.010Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:30:27.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:27 vm08 ceph-mon[82639]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 96 KiB/s rd, 170 B/s wr, 147 op/s 2026-03-10T13:30:28.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:28 vm00 ceph-mon[94470]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 109 KiB/s rd, 170 B/s wr, 168 op/s 2026-03-10T13:30:28.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:28 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:28.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:28 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:30:28.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:28 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:28.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:28 vm00 ceph-mon[96293]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 109 KiB/s rd, 170 B/s wr, 168 op/s 2026-03-10T13:30:28.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:28 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:28.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:28 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:30:28.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:28 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:28.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:28 vm08 ceph-mon[82639]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 109 KiB/s rd, 170 B/s wr, 168 op/s 2026-03-10T13:30:28.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:28 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:28.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:28 vm08 ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:30:28.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:28 vm08 ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:30.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:30 vm00 ceph-mon[94470]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 110 KiB/s rd, 170 B/s wr, 168 op/s 2026-03-10T13:30:30.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:30 vm00 ceph-mon[96293]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 110 KiB/s rd, 170 B/s wr, 168 op/s 2026-03-10T13:30:30.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:30 vm08 ceph-mon[82639]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 110 KiB/s rd, 170 B/s wr, 168 op/s 2026-03-10T13:30:31.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:31 vm00 ceph-mon[94470]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:30:31.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:31 vm00 ceph-mon[96293]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:30:31.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:31 vm08 ceph-mon[82639]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:30:32.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:32 vm00 ceph-mon[94470]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 111 KiB/s rd, 170 B/s wr, 170 op/s 2026-03-10T13:30:32.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:32 vm00 ceph-mon[96293]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 111 KiB/s rd, 170 B/s wr, 170 op/s 2026-03-10T13:30:32.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:32 vm08 ceph-mon[82639]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 111 KiB/s rd, 170 B/s wr, 170 op/s 2026-03-10T13:30:34.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:34 vm00 ceph-mon[94470]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 94 KiB/s rd, 85 B/s wr, 145 op/s 2026-03-10T13:30:34.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:34 vm00 ceph-mon[96293]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 94 KiB/s rd, 85 B/s wr, 145 op/s 2026-03-10T13:30:34.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:34 vm08.local ceph-mon[82639]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 94 KiB/s rd, 85 B/s wr, 145 op/s 2026-03-10T13:30:36.003 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:30:35 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:30:35] "GET /metrics HTTP/1.1" 200 37918 "" "Prometheus/2.51.0" 2026-03-10T13:30:36.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:36 vm00 ceph-mon[94470]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 81 KiB/s rd, 85 B/s wr, 125 op/s 2026-03-10T13:30:36.752 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:36 vm00 ceph-mon[96293]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 81 KiB/s rd, 85 B/s wr, 125 op/s 2026-03-10T13:30:36.770 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:36 vm08.local ceph-mon[82639]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 81 KiB/s rd, 85 B/s wr, 125 op/s 2026-03-10T13:30:37.503 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:30:37 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:30:37.009Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:30:37.503 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:30:37 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:30:37.010Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:30:39.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:38 vm00 ceph-mon[94470]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 23 op/s 2026-03-10T13:30:39.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:38 vm00 ceph-mon[96293]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 23 op/s 2026-03-10T13:30:39.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:38 vm08.local ceph-mon[82639]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 23 op/s 2026-03-10T13:30:41.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:40 vm00 ceph-mon[94470]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 2.1 KiB/s rd, 2 op/s 2026-03-10T13:30:41.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:40 vm00 ceph-mon[96293]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 2.1 KiB/s rd, 2 op/s 2026-03-10T13:30:41.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:40 vm08.local ceph-mon[82639]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 2.1 KiB/s rd, 2 op/s 2026-03-10T13:30:42.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:41 vm00 ceph-mon[94470]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:30:42.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:41 vm00 ceph-mon[96293]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:30:42.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:41 vm08.local ceph-mon[82639]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:30:43.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:42 vm00 ceph-mon[94470]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 2.2 KiB/s rd, 2 op/s 2026-03-10T13:30:43.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:42 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:30:43.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:42 vm00 ceph-mon[96293]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 2.2 KiB/s rd, 2 op/s 2026-03-10T13:30:43.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:42 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:30:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:42 vm08.local ceph-mon[82639]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 2.2 KiB/s rd, 2 op/s 2026-03-10T13:30:43.021 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:42 vm08.local ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:30:45.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:44 vm00 ceph-mon[94470]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:30:45.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:44 vm00 ceph-mon[96293]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:30:45.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:44 vm08.local ceph-mon[82639]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:30:46.002 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:30:45 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:30:45] "GET /metrics HTTP/1.1" 200 37916 "" "Prometheus/2.51.0" 2026-03-10T13:30:47.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:46 vm00 ceph-mon[94470]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:30:47.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:46 vm00 ceph-mon[96293]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:30:47.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:46 vm08.local ceph-mon[82639]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:30:47.503 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:30:47 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:30:47.010Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:30:47.503 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:30:47 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:30:47.011Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:30:49.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:48 vm00 ceph-mon[94470]: pgmap v157: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:30:49.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:48 vm00 ceph-mon[96293]: pgmap v157: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:30:49.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:48 vm08.local ceph-mon[82639]: pgmap v157: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:30:49.173 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-10T13:30:49.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:49 vm00 ceph-mon[94470]: from='client.44499 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:49.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:49 vm00 ceph-mon[96293]: from='client.44499 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:49.787 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T13:30:49.787 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (17m) 26s ago 23m 26.4M - 0.25.0 c8568f914cd2 12fde3cf83cb 2026-03-10T13:30:49.787 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm08 *:3000 running (4m) 26s ago 23m 76.5M - 10.4.0 c8b91775d855 960e32589e98 2026-03-10T13:30:49.787 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dezodo vm00 running (4m) 26s ago 23m 49.7M - 3.5 e1d6a67b021e 630bf6d4e7f3 2026-03-10T13:30:49.787 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm08 *:8443,9283,8765 running (5m) 26s ago 25m 488M - 19.2.3-678-ge911bdeb 654f31e6858e 31b91eebc856 2026-03-10T13:30:49.787 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (11m) 26s ago 26m 563M - 19.2.3-678-ge911bdeb 654f31e6858e 5bc576d4d32b 2026-03-10T13:30:49.787 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (3m) 26s ago 26m 56.5M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 981df6371890 2026-03-10T13:30:49.787 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm08 running (4m) 26s ago 25m 52.7M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 8cceb678a9ee 2026-03-10T13:30:49.787 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (3m) 26s ago 25m 47.1M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 43deda66dee3 2026-03-10T13:30:49.787 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (17m) 26s ago 24m 10.5M - 1.7.0 72c9c2088986 bcf883401619 2026-03-10T13:30:49.787 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm08 *:9100 running (17m) 26s ago 23m 10.2M - 1.7.0 72c9c2088986 4ac83f03f818 2026-03-10T13:30:49.787 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (3m) 26s ago 25m 53.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 5fc74f4d2179 2026-03-10T13:30:49.787 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (3m) 26s ago 25m 74.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e dc65e199e9eb 2026-03-10T13:30:49.787 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (2m) 26s ago 25m 70.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 706171e0f5c2 2026-03-10T13:30:49.787 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (2m) 26s ago 24m 52.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 8739c77cf14d 2026-03-10T13:30:49.787 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm08 running (106s) 26s ago 24m 75.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 5b92674798b7 2026-03-10T13:30:49.787 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm08 running (85s) 26s ago 24m 48.8M 4096M 19.2.3-678-ge911bdeb 654f31e6858e e197b6bd6561 2026-03-10T13:30:49.787 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm08 running (63s) 26s ago 24m 71.9M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 529ca6d92bc2 2026-03-10T13:30:49.787 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm08 running (41s) 26s ago 24m 47.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e f59d4e9eed6e 2026-03-10T13:30:49.787 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm08 *:9095 running (5m) 26s ago 23m 48.5M - 2.51.0 1d3b7f56885b 3f9b2d0821c9 2026-03-10T13:30:49.787 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.tvlvzo vm00 *:8000 running (30s) 26s ago 23m 97.8M - 19.2.3-678-ge911bdeb 654f31e6858e 580ac5f891f7 2026-03-10T13:30:49.787 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm08.ljayps vm08 *:8000 running (28s) 26s ago 23m 98.6M - 19.2.3-678-ge911bdeb 654f31e6858e 9f8220442a4b 2026-03-10T13:30:49.844 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.rgw | length == 1'"'"'' 2026-03-10T13:30:49.922 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:49 vm08.local ceph-mon[82639]: from='client.44499 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:50.481 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T13:30:50.516 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.rgw | keys'"'"' | grep $sha1' 2026-03-10T13:30:50.737 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:50 vm00 ceph-mon[94470]: pgmap v158: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:30:50.737 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:50 vm00 ceph-mon[94470]: from='client.34469 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:50.737 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:50 vm00 ceph-mon[94470]: from='client.? 192.168.123.100:0/1125875667' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:50.737 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:50 vm00 ceph-mon[96293]: pgmap v158: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:30:50.737 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:50 vm00 ceph-mon[96293]: from='client.34469 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:50.737 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:50 vm00 ceph-mon[96293]: from='client.? 192.168.123.100:0/1125875667' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:51.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:50 vm08.local ceph-mon[82639]: pgmap v158: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:30:51.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:50 vm08.local ceph-mon[82639]: from='client.34469 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:51.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:50 vm08.local ceph-mon[82639]: from='client.? 192.168.123.100:0/1125875667' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:51.128 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)" 2026-03-10T13:30:51.205 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade status' 2026-03-10T13:30:51.918 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:30:51.918 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": null, 2026-03-10T13:30:51.918 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": false, 2026-03-10T13:30:51.918 INFO:teuthology.orchestra.run.vm00.stdout: "which": "", 2026-03-10T13:30:51.918 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [], 2026-03-10T13:30:51.918 INFO:teuthology.orchestra.run.vm00.stdout: "progress": null, 2026-03-10T13:30:51.918 INFO:teuthology.orchestra.run.vm00.stdout: "message": "", 2026-03-10T13:30:51.918 INFO:teuthology.orchestra.run.vm00.stdout: "is_paused": false 2026-03-10T13:30:51.918 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:30:51.919 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:51 vm00 ceph-mon[94470]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:30:51.919 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:51 vm00 ceph-mon[94470]: from='client.? 192.168.123.100:0/2641468050' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:51.919 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:51 vm00 ceph-mon[96293]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:30:51.919 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:51 vm00 ceph-mon[96293]: from='client.? 192.168.123.100:0/2641468050' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:52.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:51 vm08.local ceph-mon[82639]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:30:52.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:51 vm08.local ceph-mon[82639]: from='client.? 192.168.123.100:0/2641468050' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:52.093 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph health detail' 2026-03-10T13:30:52.780 INFO:teuthology.orchestra.run.vm00.stdout:HEALTH_OK 2026-03-10T13:30:52.841 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1' 2026-03-10T13:30:53.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:52 vm00 ceph-mon[94470]: pgmap v159: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:30:53.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:52 vm00 ceph-mon[94470]: from='client.54445 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:53.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:52 vm00 ceph-mon[96293]: pgmap v159: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:30:53.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:52 vm00 ceph-mon[96293]: from='client.54445 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:53.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:52 vm08.local ceph-mon[82639]: pgmap v159: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:30:53.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:52 vm08.local ceph-mon[82639]: from='client.54445 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:53.398 INFO:teuthology.orchestra.run.vm00.stdout:Initiating upgrade to quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:30:53.464 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-10T13:30:53.467 INFO:tasks.cephadm:Running commands on role mon.a host ubuntu@vm00.local 2026-03-10T13:30:53.467 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'while ceph orch upgrade status | jq '"'"'.in_progress'"'"' | grep true && ! ceph orch upgrade status | jq '"'"'.message'"'"' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; ceph health detail ; sleep 30 ; done' 2026-03-10T13:30:53.899 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:53 vm00 ceph-mon[94470]: from='client.? 192.168.123.100:0/2738037840' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T13:30:53.899 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:53 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:53.899 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:53 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:30:53.899 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:53 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:30:53.899 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:53 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:53.900 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:53 vm00 ceph-mon[96293]: from='client.? 192.168.123.100:0/2738037840' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T13:30:53.900 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:53 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:53.900 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:53 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:30:53.900 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:53 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:30:53.900 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:53 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:54.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:53 vm08.local ceph-mon[82639]: from='client.? 192.168.123.100:0/2738037840' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T13:30:54.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:53 vm08.local ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:54.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:53 vm08.local ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:30:54.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:53 vm08.local ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:30:54.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:53 vm08.local ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:54.136 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T13:30:54.662 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T13:30:54.662 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (17m) 31s ago 23m 26.4M - 0.25.0 c8568f914cd2 12fde3cf83cb 2026-03-10T13:30:54.662 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm08 *:3000 running (4m) 31s ago 23m 76.5M - 10.4.0 c8b91775d855 960e32589e98 2026-03-10T13:30:54.662 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dezodo vm00 running (5m) 31s ago 23m 49.7M - 3.5 e1d6a67b021e 630bf6d4e7f3 2026-03-10T13:30:54.662 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm08 *:8443,9283,8765 running (5m) 31s ago 25m 488M - 19.2.3-678-ge911bdeb 654f31e6858e 31b91eebc856 2026-03-10T13:30:54.662 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (11m) 31s ago 26m 563M - 19.2.3-678-ge911bdeb 654f31e6858e 5bc576d4d32b 2026-03-10T13:30:54.662 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (4m) 31s ago 26m 56.5M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 981df6371890 2026-03-10T13:30:54.662 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm08 running (4m) 31s ago 25m 52.7M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 8cceb678a9ee 2026-03-10T13:30:54.662 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (3m) 31s ago 25m 47.1M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 43deda66dee3 2026-03-10T13:30:54.662 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (17m) 31s ago 24m 10.5M - 1.7.0 72c9c2088986 bcf883401619 2026-03-10T13:30:54.662 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm08 *:9100 running (17m) 31s ago 24m 10.2M - 1.7.0 72c9c2088986 4ac83f03f818 2026-03-10T13:30:54.662 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (3m) 31s ago 25m 53.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 5fc74f4d2179 2026-03-10T13:30:54.662 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (3m) 31s ago 25m 74.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e dc65e199e9eb 2026-03-10T13:30:54.662 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (2m) 31s ago 25m 70.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 706171e0f5c2 2026-03-10T13:30:54.662 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (2m) 31s ago 24m 52.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 8739c77cf14d 2026-03-10T13:30:54.662 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm08 running (111s) 31s ago 24m 75.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 5b92674798b7 2026-03-10T13:30:54.662 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm08 running (90s) 31s ago 24m 48.8M 4096M 19.2.3-678-ge911bdeb 654f31e6858e e197b6bd6561 2026-03-10T13:30:54.662 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm08 running (68s) 31s ago 24m 71.9M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 529ca6d92bc2 2026-03-10T13:30:54.662 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm08 running (46s) 31s ago 24m 47.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e f59d4e9eed6e 2026-03-10T13:30:54.662 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm08 *:9095 running (5m) 31s ago 23m 48.5M - 2.51.0 1d3b7f56885b 3f9b2d0821c9 2026-03-10T13:30:54.662 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.tvlvzo vm00 *:8000 running (35s) 31s ago 23m 97.8M - 19.2.3-678-ge911bdeb 654f31e6858e 580ac5f891f7 2026-03-10T13:30:54.662 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm08.ljayps vm08 *:8000 running (33s) 31s ago 23m 98.6M - 19.2.3-678-ge911bdeb 654f31e6858e 9f8220442a4b 2026-03-10T13:30:54.982 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:30:54.982 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T13:30:54.982 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-10T13:30:54.982 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:30:54.982 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T13:30:54.982 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T13:30:54.982 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:30:54.982 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T13:30:54.982 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 8 2026-03-10T13:30:54.982 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:30:54.982 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T13:30:54.982 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T13:30:54.982 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:30:54.982 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T13:30:54.982 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 15 2026-03-10T13:30:54.982 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T13:30:54.982 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:30:55.219 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[94470]: pgmap v160: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:30:55.219 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[94470]: from='client.44526 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:55.219 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[94470]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:30:55.219 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[94470]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:30:55.219 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:55.219 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[94470]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-10T13:30:55.219 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[94470]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-10T13:30:55.219 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:30:55.219 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:55.219 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all mgr 2026-03-10T13:30:55.219 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:55.219 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:55.219 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all mon 2026-03-10T13:30:55.219 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:55.219 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:55.219 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all crash 2026-03-10T13:30:55.219 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:55.219 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:55.219 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all osd 2026-03-10T13:30:55.219 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:55.219 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:55.219 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all mds 2026-03-10T13:30:55.219 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:55.219 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all rgw 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all rbd-mirror 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all cephfs-mirror 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[94470]: Upgrade: Setting container_image for all ceph-exporter 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[96293]: pgmap v160: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[96293]: from='client.44526 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[96293]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[96293]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[96293]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[96293]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all mgr 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all mon 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all crash 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all osd 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all mds 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all rgw 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all rbd-mirror 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all cephfs-mirror 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[96293]: Upgrade: Setting container_image for all ceph-exporter 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:55.220 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:55.221 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:30:55.221 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:30:55.221 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:54 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:30:55.221 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:30:55.221 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T13:30:55.221 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": true, 2026-03-10T13:30:55.221 INFO:teuthology.orchestra.run.vm00.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-10T13:30:55.221 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [ 2026-03-10T13:30:55.221 INFO:teuthology.orchestra.run.vm00.stdout: "mon", 2026-03-10T13:30:55.221 INFO:teuthology.orchestra.run.vm00.stdout: "mgr", 2026-03-10T13:30:55.221 INFO:teuthology.orchestra.run.vm00.stdout: "rgw", 2026-03-10T13:30:55.221 INFO:teuthology.orchestra.run.vm00.stdout: "osd" 2026-03-10T13:30:55.221 INFO:teuthology.orchestra.run.vm00.stdout: ], 2026-03-10T13:30:55.221 INFO:teuthology.orchestra.run.vm00.stdout: "progress": "15/21 daemons upgraded", 2026-03-10T13:30:55.221 INFO:teuthology.orchestra.run.vm00.stdout: "message": "Currently upgrading iscsi daemons", 2026-03-10T13:30:55.221 INFO:teuthology.orchestra.run.vm00.stdout: "is_paused": false 2026-03-10T13:30:55.221 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:30:55.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:54 vm08.local ceph-mon[82639]: pgmap v160: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:30:55.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:54 vm08.local ceph-mon[82639]: from='client.44526 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:55.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:54 vm08.local ceph-mon[82639]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:30:55.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:54 vm08.local ceph-mon[82639]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:30:55.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:54 vm08.local ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:55.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:54 vm08.local ceph-mon[82639]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-10T13:30:55.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:54 vm08.local ceph-mon[82639]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-10T13:30:55.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:54 vm08.local ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T13:30:55.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:54 vm08.local ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:55.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:54 vm08.local ceph-mon[82639]: Upgrade: Setting container_image for all mgr 2026-03-10T13:30:55.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:54 vm08.local ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:55.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:54 vm08.local ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:55.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:54 vm08.local ceph-mon[82639]: Upgrade: Setting container_image for all mon 2026-03-10T13:30:55.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:54 vm08.local ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:55.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:54 vm08.local ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:55.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:54 vm08.local ceph-mon[82639]: Upgrade: Setting container_image for all crash 2026-03-10T13:30:55.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:54 vm08.local ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:55.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:54 vm08.local ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:55.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:54 vm08.local ceph-mon[82639]: Upgrade: Setting container_image for all osd 2026-03-10T13:30:55.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:54 vm08.local ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:55.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:54 vm08.local ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:55.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:54 vm08.local ceph-mon[82639]: Upgrade: Setting container_image for all mds 2026-03-10T13:30:55.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:54 vm08.local ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:55.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:54 vm08.local ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:55.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:54 vm08.local ceph-mon[82639]: Upgrade: Setting container_image for all rgw 2026-03-10T13:30:55.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:54 vm08.local ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:55.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:54 vm08.local ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:55.271 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:54 vm08.local ceph-mon[82639]: Upgrade: Setting container_image for all rbd-mirror 2026-03-10T13:30:55.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:54 vm08.local ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:55.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:54 vm08.local ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:55.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:54 vm08.local ceph-mon[82639]: Upgrade: Setting container_image for all cephfs-mirror 2026-03-10T13:30:55.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:54 vm08.local ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:55.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:54 vm08.local ceph-mon[82639]: Upgrade: Setting container_image for all ceph-exporter 2026-03-10T13:30:55.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:54 vm08.local ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:55.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:54 vm08.local ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:30:55.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:54 vm08.local ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:30:55.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:54 vm08.local ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm00.dezodo", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T13:30:55.272 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:54 vm08.local ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:30:55.472 INFO:teuthology.orchestra.run.vm00.stdout:HEALTH_OK 2026-03-10T13:30:55.752 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:30:55 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:30:55] "GET /metrics HTTP/1.1" 200 37916 "" "Prometheus/2.51.0" 2026-03-10T13:30:56.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:55 vm00 ceph-mon[94470]: from='client.44532 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:56.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:55 vm00 ceph-mon[94470]: from='client.54460 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:56.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:55 vm00 ceph-mon[94470]: Upgrade: Updating iscsi.foo.vm00.dezodo 2026-03-10T13:30:56.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:55 vm00 ceph-mon[94470]: Deploying daemon iscsi.foo.vm00.dezodo on vm00 2026-03-10T13:30:56.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:55 vm00 ceph-mon[94470]: from='client.34496 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:56.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:55 vm00 ceph-mon[94470]: from='client.? 192.168.123.100:0/2852107884' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:56.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:55 vm00 ceph-mon[94470]: from='client.? 192.168.123.100:0/3243521630' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T13:30:56.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:55 vm00 ceph-mon[96293]: from='client.44532 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:56.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:55 vm00 ceph-mon[96293]: from='client.54460 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:56.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:55 vm00 ceph-mon[96293]: Upgrade: Updating iscsi.foo.vm00.dezodo 2026-03-10T13:30:56.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:55 vm00 ceph-mon[96293]: Deploying daemon iscsi.foo.vm00.dezodo on vm00 2026-03-10T13:30:56.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:55 vm00 ceph-mon[96293]: from='client.34496 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:56.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:55 vm00 ceph-mon[96293]: from='client.? 192.168.123.100:0/2852107884' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:56.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:55 vm00 ceph-mon[96293]: from='client.? 192.168.123.100:0/3243521630' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T13:30:56.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:55 vm08.local ceph-mon[82639]: from='client.44532 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:56.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:55 vm08.local ceph-mon[82639]: from='client.54460 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:56.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:55 vm08.local ceph-mon[82639]: Upgrade: Updating iscsi.foo.vm00.dezodo 2026-03-10T13:30:56.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:55 vm08.local ceph-mon[82639]: Deploying daemon iscsi.foo.vm00.dezodo on vm00 2026-03-10T13:30:56.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:55 vm08.local ceph-mon[82639]: from='client.34496 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:56.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:55 vm08.local ceph-mon[82639]: from='client.? 192.168.123.100:0/2852107884' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:30:56.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:55 vm08.local ceph-mon[82639]: from='client.? 192.168.123.100:0/3243521630' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T13:30:57.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:56 vm00 ceph-mon[94470]: from='client.54478 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:57.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:56 vm00 ceph-mon[94470]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:30:57.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:30:57 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:30:57.010Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:30:57.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:30:57 vm00 ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:30:57.011Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:30:57.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:56 vm00 ceph-mon[96293]: from='client.54478 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:57.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:56 vm00 ceph-mon[96293]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:30:57.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:56 vm08.local ceph-mon[82639]: from='client.54478 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:30:57.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:56 vm08.local ceph-mon[82639]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:30:58.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:57 vm00 ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:30:58.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:57 vm00 ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:30:58.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:57 vm08.local ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:30:59.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:30:59 vm08.local ceph-mon[82639]: pgmap v162: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:30:59.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:30:59 vm00 ceph-mon[94470]: pgmap v162: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:30:59.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:30:59 vm00 ceph-mon[96293]: pgmap v162: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:31:01.475 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:01 vm08.local ceph-mon[82639]: pgmap v163: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:31:01.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:01 vm00 ceph-mon[94470]: pgmap v163: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:31:01.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:01 vm00 ceph-mon[96293]: pgmap v163: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:31:02.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:02 vm00 ceph-mon[94470]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:31:02.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:02 vm00 ceph-mon[96293]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:31:02.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:02 vm08.local ceph-mon[82639]: from='client.15249 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:31:03.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:03 vm00 ceph-mon[94470]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:31:03.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:03 vm00 ceph-mon[96293]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:31:03.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:03 vm08.local ceph-mon[82639]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:31:05.349 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:05 vm00.local ceph-mon[94470]: pgmap v165: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:31:05.350 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:05 vm00.local ceph-mon[96293]: pgmap v165: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:31:05.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:05 vm08.local ceph-mon[82639]: pgmap v165: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:31:05.610 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:31:05 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:31:05.455+0000 7f1437cb8640 -1 log_channel(cephadm) log [ERR] : Upgrade: Paused due to UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm00.dezodo on host vm00 failed. 2026-03-10T13:31:06.002 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:31:05 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:31:05] "GET /metrics HTTP/1.1" 200 37917 "" "Prometheus/2.51.0" 2026-03-10T13:31:06.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:06 vm00.local ceph-mon[94470]: pgmap v166: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:31:06.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:06 vm00.local ceph-mon[94470]: Upgrade: Paused due to UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm00.dezodo on host vm00 failed. 2026-03-10T13:31:06.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:06 vm00.local ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:31:06.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:06 vm00.local ceph-mon[94470]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1010 B/s rd, 0 op/s 2026-03-10T13:31:06.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:06 vm00.local ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:31:06.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:06 vm00.local ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:31:06.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:06 vm00.local ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:31:06.753 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:06 vm00.local ceph-mon[94470]: Health check failed: Upgrading daemon iscsi.foo.vm00.dezodo on host vm00 failed. (UPGRADE_REDEPLOY_DAEMON) 2026-03-10T13:31:06.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:06 vm00.local ceph-mon[96293]: pgmap v166: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:31:06.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:06 vm00.local ceph-mon[96293]: Upgrade: Paused due to UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm00.dezodo on host vm00 failed. 2026-03-10T13:31:06.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:06 vm00.local ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:31:06.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:06 vm00.local ceph-mon[96293]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1010 B/s rd, 0 op/s 2026-03-10T13:31:06.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:06 vm00.local ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:31:06.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:06 vm00.local ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:31:06.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:06 vm00.local ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:31:06.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:06 vm00.local ceph-mon[96293]: Health check failed: Upgrading daemon iscsi.foo.vm00.dezodo on host vm00 failed. (UPGRADE_REDEPLOY_DAEMON) 2026-03-10T13:31:06.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:06 vm08.local ceph-mon[82639]: pgmap v166: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:31:06.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:06 vm08.local ceph-mon[82639]: Upgrade: Paused due to UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm00.dezodo on host vm00 failed. 2026-03-10T13:31:06.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:06 vm08.local ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:31:06.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:06 vm08.local ceph-mon[82639]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1010 B/s rd, 0 op/s 2026-03-10T13:31:06.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:06 vm08.local ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:31:06.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:06 vm08.local ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:31:06.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:06 vm08.local ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:31:06.771 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:06 vm08.local ceph-mon[82639]: Health check failed: Upgrading daemon iscsi.foo.vm00.dezodo on host vm00 failed. (UPGRADE_REDEPLOY_DAEMON) 2026-03-10T13:31:07.503 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:31:07 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:31:07.011Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:31:07.503 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:31:07 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:31:07.012Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:31:09.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:08 vm00.local ceph-mon[94470]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1010 B/s rd, 0 op/s 2026-03-10T13:31:09.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:08 vm00.local ceph-mon[96293]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1010 B/s rd, 0 op/s 2026-03-10T13:31:09.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:08 vm08.local ceph-mon[82639]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1010 B/s rd, 0 op/s 2026-03-10T13:31:11.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:10 vm00.local ceph-mon[94470]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1010 B/s rd, 0 op/s 2026-03-10T13:31:11.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:10 vm00.local ceph-mon[96293]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1010 B/s rd, 0 op/s 2026-03-10T13:31:11.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:10 vm08.local ceph-mon[82639]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1010 B/s rd, 0 op/s 2026-03-10T13:31:13.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:12 vm00.local ceph-mon[94470]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 505 B/s rd, 0 op/s 2026-03-10T13:31:13.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:12 vm00.local ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:31:13.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:12 vm00.local ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:31:13.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:12 vm00.local ceph-mon[96293]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 505 B/s rd, 0 op/s 2026-03-10T13:31:13.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:12 vm00.local ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:31:13.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:12 vm00.local ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:31:13.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:12 vm08.local ceph-mon[82639]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 505 B/s rd, 0 op/s 2026-03-10T13:31:13.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:12 vm08.local ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:31:13.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:12 vm08.local ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:31:15.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:14 vm00.local ceph-mon[94470]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 505 B/s rd, 0 op/s 2026-03-10T13:31:15.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:14 vm00.local ceph-mon[96293]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 505 B/s rd, 0 op/s 2026-03-10T13:31:15.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:14 vm08.local ceph-mon[82639]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 505 B/s rd, 0 op/s 2026-03-10T13:31:15.884 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:31:15 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:31:15] "GET /metrics HTTP/1.1" 200 37993 "" "Prometheus/2.51.0" 2026-03-10T13:31:17.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:16 vm00.local ceph-mon[94470]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:31:17.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:16 vm00.local ceph-mon[94470]: from='client.? 192.168.123.100:0/662740709' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T13:31:17.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:16 vm00.local ceph-mon[94470]: from='client.? 192.168.123.100:0/1756308668' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/389872585"}]: dispatch 2026-03-10T13:31:17.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:16 vm00.local ceph-mon[94470]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/389872585"}]: dispatch 2026-03-10T13:31:17.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:16 vm00.local ceph-mon[96293]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:31:17.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:16 vm00.local ceph-mon[96293]: from='client.? 192.168.123.100:0/662740709' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T13:31:17.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:16 vm00.local ceph-mon[96293]: from='client.? 192.168.123.100:0/1756308668' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/389872585"}]: dispatch 2026-03-10T13:31:17.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:16 vm00.local ceph-mon[96293]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/389872585"}]: dispatch 2026-03-10T13:31:17.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:16 vm08.local ceph-mon[82639]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:31:17.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:16 vm08.local ceph-mon[82639]: from='client.? 192.168.123.100:0/662740709' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T13:31:17.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:16 vm08.local ceph-mon[82639]: from='client.? 192.168.123.100:0/1756308668' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/389872585"}]: dispatch 2026-03-10T13:31:17.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:16 vm08.local ceph-mon[82639]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/389872585"}]: dispatch 2026-03-10T13:31:17.302 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:31:17 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:31:17.012Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:31:17.302 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:31:17 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:31:17.013Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:31:18.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:17 vm00.local ceph-mon[94470]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/389872585"}]': finished 2026-03-10T13:31:18.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:17 vm00.local ceph-mon[94470]: osdmap e135: 8 total, 8 up, 8 in 2026-03-10T13:31:18.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:17 vm00.local ceph-mon[94470]: from='client.? 192.168.123.100:0/3694690336' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1977751189"}]: dispatch 2026-03-10T13:31:18.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:17 vm00.local ceph-mon[94470]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1977751189"}]: dispatch 2026-03-10T13:31:18.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:17 vm00.local ceph-mon[96293]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/389872585"}]': finished 2026-03-10T13:31:18.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:17 vm00.local ceph-mon[96293]: osdmap e135: 8 total, 8 up, 8 in 2026-03-10T13:31:18.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:17 vm00.local ceph-mon[96293]: from='client.? 192.168.123.100:0/3694690336' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1977751189"}]: dispatch 2026-03-10T13:31:18.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:17 vm00.local ceph-mon[96293]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1977751189"}]: dispatch 2026-03-10T13:31:18.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:17 vm08.local ceph-mon[82639]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/389872585"}]': finished 2026-03-10T13:31:18.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:17 vm08.local ceph-mon[82639]: osdmap e135: 8 total, 8 up, 8 in 2026-03-10T13:31:18.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:17 vm08.local ceph-mon[82639]: from='client.? 192.168.123.100:0/3694690336' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1977751189"}]: dispatch 2026-03-10T13:31:18.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:17 vm08.local ceph-mon[82639]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1977751189"}]: dispatch 2026-03-10T13:31:19.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:18 vm00.local ceph-mon[94470]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:31:19.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:18 vm00.local ceph-mon[94470]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1977751189"}]': finished 2026-03-10T13:31:19.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:18 vm00.local ceph-mon[94470]: osdmap e136: 8 total, 8 up, 8 in 2026-03-10T13:31:19.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:18 vm00.local ceph-mon[94470]: from='client.? 192.168.123.100:0/1457095721' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3974826919"}]: dispatch 2026-03-10T13:31:19.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:18 vm00.local ceph-mon[94470]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3974826919"}]: dispatch 2026-03-10T13:31:19.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:18 vm00.local ceph-mon[96293]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:31:19.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:18 vm00.local ceph-mon[96293]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1977751189"}]': finished 2026-03-10T13:31:19.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:18 vm00.local ceph-mon[96293]: osdmap e136: 8 total, 8 up, 8 in 2026-03-10T13:31:19.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:18 vm00.local ceph-mon[96293]: from='client.? 192.168.123.100:0/1457095721' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3974826919"}]: dispatch 2026-03-10T13:31:19.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:18 vm00.local ceph-mon[96293]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3974826919"}]: dispatch 2026-03-10T13:31:19.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:18 vm08.local ceph-mon[82639]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:31:19.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:18 vm08.local ceph-mon[82639]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/1977751189"}]': finished 2026-03-10T13:31:19.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:18 vm08.local ceph-mon[82639]: osdmap e136: 8 total, 8 up, 8 in 2026-03-10T13:31:19.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:18 vm08.local ceph-mon[82639]: from='client.? 192.168.123.100:0/1457095721' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3974826919"}]: dispatch 2026-03-10T13:31:19.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:18 vm08.local ceph-mon[82639]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3974826919"}]: dispatch 2026-03-10T13:31:19.922 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:19 vm08.local ceph-mon[82639]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3974826919"}]': finished 2026-03-10T13:31:19.922 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:19 vm08.local ceph-mon[82639]: osdmap e137: 8 total, 8 up, 8 in 2026-03-10T13:31:19.922 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:19 vm08.local ceph-mon[82639]: from='client.? 192.168.123.100:0/1094365110' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/1972849199"}]: dispatch 2026-03-10T13:31:19.922 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:19 vm08.local ceph-mon[82639]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/1972849199"}]: dispatch 2026-03-10T13:31:20.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:19 vm00.local ceph-mon[94470]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3974826919"}]': finished 2026-03-10T13:31:20.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:19 vm00.local ceph-mon[94470]: osdmap e137: 8 total, 8 up, 8 in 2026-03-10T13:31:20.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:19 vm00.local ceph-mon[94470]: from='client.? 192.168.123.100:0/1094365110' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/1972849199"}]: dispatch 2026-03-10T13:31:20.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:19 vm00.local ceph-mon[94470]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/1972849199"}]: dispatch 2026-03-10T13:31:20.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:19 vm00.local ceph-mon[96293]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/3974826919"}]': finished 2026-03-10T13:31:20.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:19 vm00.local ceph-mon[96293]: osdmap e137: 8 total, 8 up, 8 in 2026-03-10T13:31:20.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:19 vm00.local ceph-mon[96293]: from='client.? 192.168.123.100:0/1094365110' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/1972849199"}]: dispatch 2026-03-10T13:31:20.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:19 vm00.local ceph-mon[96293]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/1972849199"}]: dispatch 2026-03-10T13:31:21.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:20 vm00.local ceph-mon[94470]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:31:21.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:20 vm00.local ceph-mon[94470]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/1972849199"}]': finished 2026-03-10T13:31:21.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:20 vm00.local ceph-mon[94470]: osdmap e138: 8 total, 8 up, 8 in 2026-03-10T13:31:21.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:20 vm00.local ceph-mon[94470]: from='client.? 192.168.123.100:0/3276861430' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/1972849199"}]: dispatch 2026-03-10T13:31:21.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:20 vm00.local ceph-mon[94470]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/1972849199"}]: dispatch 2026-03-10T13:31:21.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:20 vm00.local ceph-mon[96293]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:31:21.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:20 vm00.local ceph-mon[96293]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/1972849199"}]': finished 2026-03-10T13:31:21.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:20 vm00.local ceph-mon[96293]: osdmap e138: 8 total, 8 up, 8 in 2026-03-10T13:31:21.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:20 vm00.local ceph-mon[96293]: from='client.? 192.168.123.100:0/3276861430' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/1972849199"}]: dispatch 2026-03-10T13:31:21.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:20 vm00.local ceph-mon[96293]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/1972849199"}]: dispatch 2026-03-10T13:31:21.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:20 vm08.local ceph-mon[82639]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail 2026-03-10T13:31:21.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:20 vm08.local ceph-mon[82639]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6800/1972849199"}]': finished 2026-03-10T13:31:21.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:20 vm08.local ceph-mon[82639]: osdmap e138: 8 total, 8 up, 8 in 2026-03-10T13:31:21.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:20 vm08.local ceph-mon[82639]: from='client.? 192.168.123.100:0/3276861430' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/1972849199"}]: dispatch 2026-03-10T13:31:21.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:20 vm08.local ceph-mon[82639]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/1972849199"}]: dispatch 2026-03-10T13:31:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:21 vm00.local ceph-mon[94470]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/1972849199"}]': finished 2026-03-10T13:31:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:21 vm00.local ceph-mon[94470]: osdmap e139: 8 total, 8 up, 8 in 2026-03-10T13:31:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:21 vm00.local ceph-mon[94470]: from='client.? 192.168.123.100:0/3061134086' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2045507698"}]: dispatch 2026-03-10T13:31:22.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:21 vm00.local ceph-mon[94470]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2045507698"}]: dispatch 2026-03-10T13:31:22.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:21 vm00.local ceph-mon[96293]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/1972849199"}]': finished 2026-03-10T13:31:22.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:21 vm00.local ceph-mon[96293]: osdmap e139: 8 total, 8 up, 8 in 2026-03-10T13:31:22.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:21 vm00.local ceph-mon[96293]: from='client.? 192.168.123.100:0/3061134086' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2045507698"}]: dispatch 2026-03-10T13:31:22.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:21 vm00.local ceph-mon[96293]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2045507698"}]: dispatch 2026-03-10T13:31:22.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:21 vm08.local ceph-mon[82639]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:6801/1972849199"}]': finished 2026-03-10T13:31:22.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:21 vm08.local ceph-mon[82639]: osdmap e139: 8 total, 8 up, 8 in 2026-03-10T13:31:22.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:21 vm08.local ceph-mon[82639]: from='client.? 192.168.123.100:0/3061134086' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2045507698"}]: dispatch 2026-03-10T13:31:22.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:21 vm08.local ceph-mon[82639]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2045507698"}]: dispatch 2026-03-10T13:31:23.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:22 vm00.local ceph-mon[94470]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:31:23.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:22 vm00.local ceph-mon[94470]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2045507698"}]': finished 2026-03-10T13:31:23.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:22 vm00.local ceph-mon[94470]: osdmap e140: 8 total, 8 up, 8 in 2026-03-10T13:31:23.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:22 vm00.local ceph-mon[96293]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:31:23.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:22 vm00.local ceph-mon[96293]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2045507698"}]': finished 2026-03-10T13:31:23.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:22 vm00.local ceph-mon[96293]: osdmap e140: 8 total, 8 up, 8 in 2026-03-10T13:31:23.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:22 vm08.local ceph-mon[82639]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:31:23.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:22 vm08.local ceph-mon[82639]: from='client.? ' entity='client.iscsi.foo.vm00.dezodo' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.100:0/2045507698"}]': finished 2026-03-10T13:31:23.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:22 vm08.local ceph-mon[82639]: osdmap e140: 8 total, 8 up, 8 in 2026-03-10T13:31:25.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:24 vm00.local ceph-mon[94470]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-10T13:31:25.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:24 vm00.local ceph-mon[96293]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-10T13:31:25.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:24 vm08.local ceph-mon[82639]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-10T13:31:25.712 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T13:31:25.934 INFO:teuthology.orchestra.run.vm00.stdout:"Error: UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm00.dezodo on host vm00 failed." 2026-03-10T13:31:25.961 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:31:25 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:31:25] "GET /metrics HTTP/1.1" 200 37993 "" "Prometheus/2.51.0" 2026-03-10T13:31:26.044 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-10T13:31:26.577 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T13:31:26.577 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (17m) 63s ago 24m 26.4M - 0.25.0 c8568f914cd2 12fde3cf83cb 2026-03-10T13:31:26.577 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm08 *:3000 running (5m) 63s ago 24m 76.5M - 10.4.0 c8b91775d855 960e32589e98 2026-03-10T13:31:26.577 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dezodo vm00 running (5m) 63s ago 24m 49.7M - 3.5 e1d6a67b021e 630bf6d4e7f3 2026-03-10T13:31:26.577 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm08 *:8443,9283,8765 running (5m) 63s ago 26m 488M - 19.2.3-678-ge911bdeb 654f31e6858e 31b91eebc856 2026-03-10T13:31:26.577 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (12m) 63s ago 26m 563M - 19.2.3-678-ge911bdeb 654f31e6858e 5bc576d4d32b 2026-03-10T13:31:26.577 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (4m) 63s ago 27m 56.5M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 981df6371890 2026-03-10T13:31:26.577 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm08 running (5m) 63s ago 26m 52.7M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 8cceb678a9ee 2026-03-10T13:31:26.577 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (4m) 63s ago 26m 47.1M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 43deda66dee3 2026-03-10T13:31:26.577 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (17m) 63s ago 24m 10.5M - 1.7.0 72c9c2088986 bcf883401619 2026-03-10T13:31:26.577 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm08 *:9100 running (17m) 63s ago 24m 10.2M - 1.7.0 72c9c2088986 4ac83f03f818 2026-03-10T13:31:26.577 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (3m) 63s ago 25m 53.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 5fc74f4d2179 2026-03-10T13:31:26.577 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (3m) 63s ago 25m 74.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e dc65e199e9eb 2026-03-10T13:31:26.577 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (3m) 63s ago 25m 70.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 706171e0f5c2 2026-03-10T13:31:26.577 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (2m) 63s ago 25m 52.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 8739c77cf14d 2026-03-10T13:31:26.578 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm08 running (2m) 63s ago 25m 75.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 5b92674798b7 2026-03-10T13:31:26.578 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm08 running (2m) 63s ago 25m 48.8M 4096M 19.2.3-678-ge911bdeb 654f31e6858e e197b6bd6561 2026-03-10T13:31:26.578 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm08 running (100s) 63s ago 25m 71.9M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 529ca6d92bc2 2026-03-10T13:31:26.578 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm08 running (78s) 63s ago 24m 47.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e f59d4e9eed6e 2026-03-10T13:31:26.578 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm08 *:9095 running (5m) 63s ago 24m 48.5M - 2.51.0 1d3b7f56885b 3f9b2d0821c9 2026-03-10T13:31:26.578 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.tvlvzo vm00 *:8000 running (67s) 63s ago 24m 97.8M - 19.2.3-678-ge911bdeb 654f31e6858e 580ac5f891f7 2026-03-10T13:31:26.578 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm08.ljayps vm08 *:8000 running (65s) 63s ago 24m 98.6M - 19.2.3-678-ge911bdeb 654f31e6858e 9f8220442a4b 2026-03-10T13:31:26.659 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions' 2026-03-10T13:31:26.829 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:26 vm00.local ceph-mon[94470]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 1 op/s 2026-03-10T13:31:26.830 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:26 vm00.local ceph-mon[94470]: from='client.54538 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:31:26.830 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:26 vm00.local ceph-mon[94470]: from='client.44571 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:31:26.830 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:26 vm00.local ceph-mon[94470]: from='client.34547 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:31:26.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:26 vm00.local ceph-mon[96293]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 1 op/s 2026-03-10T13:31:26.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:26 vm00.local ceph-mon[96293]: from='client.54538 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:31:26.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:26 vm00.local ceph-mon[96293]: from='client.44571 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:31:26.830 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:26 vm00.local ceph-mon[96293]: from='client.34547 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:31:27.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:26 vm08.local ceph-mon[82639]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 1 op/s 2026-03-10T13:31:27.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:26 vm08.local ceph-mon[82639]: from='client.54538 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:31:27.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:26 vm08.local ceph-mon[82639]: from='client.44571 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:31:27.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:26 vm08.local ceph-mon[82639]: from='client.34547 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:31:27.246 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:31:27 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:31:27.013Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:31:27.246 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:31:27 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:31:27.015Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:31:27.246 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:31:27.246 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T13:31:27.246 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-10T13:31:27.246 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:31:27.246 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T13:31:27.246 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T13:31:27.246 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:31:27.246 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T13:31:27.246 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 8 2026-03-10T13:31:27.246 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:31:27.246 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T13:31:27.246 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T13:31:27.246 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:31:27.246 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T13:31:27.246 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 15 2026-03-10T13:31:27.246 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T13:31:27.246 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:31:27.306 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'echo "wait for servicemap items w/ changing names to refresh"' 2026-03-10T13:31:27.648 INFO:teuthology.orchestra.run.vm00.stdout:wait for servicemap items w/ changing names to refresh 2026-03-10T13:31:27.687 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'sleep 60' 2026-03-10T13:31:27.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:27 vm00.local ceph-mon[94470]: from='client.54544 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:31:27.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:27 vm00.local ceph-mon[94470]: from='client.? 192.168.123.100:0/87941352' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:31:27.752 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:27 vm00.local ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:31:27.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:27 vm00.local ceph-mon[96293]: from='client.54544 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:31:27.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:27 vm00.local ceph-mon[96293]: from='client.? 192.168.123.100:0/87941352' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:31:27.753 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:27 vm00.local ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:31:28.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:27 vm08.local ceph-mon[82639]: from='client.54544 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:31:28.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:27 vm08.local ceph-mon[82639]: from='client.? 192.168.123.100:0/87941352' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:31:28.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:27 vm08.local ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:31:28.982 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:28 vm00.local ceph-mon[96293]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-10T13:31:28.982 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:28 vm00.local ceph-mon[94470]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-10T13:31:29.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:28 vm08.local ceph-mon[82639]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-10T13:31:31.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:30 vm00.local ceph-mon[94470]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 577 B/s rd, 0 op/s 2026-03-10T13:31:31.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:30 vm00.local ceph-mon[96293]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 577 B/s rd, 0 op/s 2026-03-10T13:31:31.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:30 vm08.local ceph-mon[82639]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 577 B/s rd, 0 op/s 2026-03-10T13:31:33.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:32 vm00.local ceph-mon[94470]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T13:31:33.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:32 vm00.local ceph-mon[96293]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T13:31:33.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:32 vm08.local ceph-mon[82639]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T13:31:35.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:34 vm00.local ceph-mon[94470]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 863 B/s rd, 0 op/s 2026-03-10T13:31:35.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:34 vm00.local ceph-mon[96293]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 863 B/s rd, 0 op/s 2026-03-10T13:31:35.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:34 vm08.local ceph-mon[82639]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 863 B/s rd, 0 op/s 2026-03-10T13:31:36.002 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:31:35 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:31:35] "GET /metrics HTTP/1.1" 200 37994 "" "Prometheus/2.51.0" 2026-03-10T13:31:37.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:36 vm00.local ceph-mon[94470]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:31:37.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:36 vm00.local ceph-mon[94470]: from='client.44571 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:31:37.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:36 vm00.local ceph-mon[96293]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:31:37.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:36 vm00.local ceph-mon[96293]: from='client.44571 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:31:37.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:36 vm08.local ceph-mon[82639]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:31:37.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:36 vm08.local ceph-mon[82639]: from='client.44571 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:31:37.502 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:31:37 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:31:37.015Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:31:37.502 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:31:37 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:31:37.016Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:31:39.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:38 vm00.local ceph-mon[94470]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:31:39.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:38 vm00.local ceph-mon[96293]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:31:39.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:38 vm08.local ceph-mon[82639]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:31:41.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:40 vm00.local ceph-mon[94470]: pgmap v190: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:31:41.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:40 vm00.local ceph-mon[96293]: pgmap v190: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:31:41.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:40 vm08.local ceph-mon[82639]: pgmap v190: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:31:43.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:42 vm00.local ceph-mon[94470]: pgmap v191: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:31:43.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:42 vm00.local ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:31:43.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:42 vm00.local ceph-mon[96293]: pgmap v191: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:31:43.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:42 vm00.local ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:31:43.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:42 vm08.local ceph-mon[82639]: pgmap v191: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:31:43.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:42 vm08.local ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:31:45.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:44 vm00.local ceph-mon[94470]: pgmap v192: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:31:45.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:44 vm00.local ceph-mon[96293]: pgmap v192: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:31:45.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:44 vm08.local ceph-mon[82639]: pgmap v192: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:31:46.002 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:31:45 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:31:45] "GET /metrics HTTP/1.1" 200 37993 "" "Prometheus/2.51.0" 2026-03-10T13:31:47.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:46 vm00.local ceph-mon[94470]: pgmap v193: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:31:47.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:46 vm00.local ceph-mon[94470]: from='client.44571 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:31:47.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:46 vm00.local ceph-mon[96293]: pgmap v193: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:31:47.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:46 vm00.local ceph-mon[96293]: from='client.44571 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:31:47.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:46 vm08.local ceph-mon[82639]: pgmap v193: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:31:47.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:46 vm08.local ceph-mon[82639]: from='client.44571 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:31:47.503 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:31:47 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:31:47.015Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:31:47.503 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:31:47 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:31:47.016Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:31:49.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:48 vm00.local ceph-mon[94470]: pgmap v194: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:31:49.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:48 vm00.local ceph-mon[96293]: pgmap v194: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:31:49.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:48 vm08.local ceph-mon[82639]: pgmap v194: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:31:51.002 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:50 vm00.local ceph-mon[96293]: pgmap v195: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:31:51.003 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:50 vm00.local ceph-mon[94470]: pgmap v195: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:31:51.020 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:50 vm08.local ceph-mon[82639]: pgmap v195: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:31:53.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:52 vm00.local ceph-mon[94470]: pgmap v196: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:31:53.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:52 vm00.local ceph-mon[96293]: pgmap v196: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:31:53.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:52 vm08.local ceph-mon[82639]: pgmap v196: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:31:55.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:54 vm00.local ceph-mon[94470]: pgmap v197: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:31:55.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:54 vm00.local ceph-mon[96293]: pgmap v197: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:31:55.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:54 vm08.local ceph-mon[82639]: pgmap v197: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:31:56.002 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:31:55 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:31:55] "GET /metrics HTTP/1.1" 200 37993 "" "Prometheus/2.51.0" 2026-03-10T13:31:57.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:56 vm00.local ceph-mon[94470]: pgmap v198: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:31:57.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:56 vm00.local ceph-mon[94470]: from='client.44571 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:31:57.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:31:57 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:31:57.016Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:31:57.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:31:57 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:31:57.017Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:31:57.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:56 vm00.local ceph-mon[96293]: pgmap v198: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:31:57.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:56 vm00.local ceph-mon[96293]: from='client.44571 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:31:57.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:56 vm08.local ceph-mon[82639]: pgmap v198: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:31:57.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:56 vm08.local ceph-mon[82639]: from='client.44571 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:31:58.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:57 vm00.local ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:31:58.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:57 vm00.local ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:31:58.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:57 vm08.local ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:31:59.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:31:58 vm00.local ceph-mon[94470]: pgmap v199: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:31:59.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:31:58 vm00.local ceph-mon[96293]: pgmap v199: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:31:59.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:31:58 vm08.local ceph-mon[82639]: pgmap v199: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:32:01.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:00 vm00.local ceph-mon[94470]: pgmap v200: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:32:01.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:00 vm00.local ceph-mon[96293]: pgmap v200: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:32:01.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:00 vm08.local ceph-mon[82639]: pgmap v200: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:32:03.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:02 vm00.local ceph-mon[94470]: pgmap v201: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:32:03.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:02 vm00.local ceph-mon[96293]: pgmap v201: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:32:03.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:02 vm08.local ceph-mon[82639]: pgmap v201: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:32:05.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:04 vm00.local ceph-mon[94470]: pgmap v202: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:32:05.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:04 vm00.local ceph-mon[96293]: pgmap v202: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:32:05.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:04 vm08.local ceph-mon[82639]: pgmap v202: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:32:06.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:05 vm00.local ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:32:06.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:05 vm00.local ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:32:06.002 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:05 vm00.local ceph-mon[94470]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:32:06.003 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:32:05 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:32:05] "GET /metrics HTTP/1.1" 200 37989 "" "Prometheus/2.51.0" 2026-03-10T13:32:06.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:05 vm00.local ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:32:06.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:05 vm00.local ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:32:06.003 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:05 vm00.local ceph-mon[96293]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:32:06.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:05 vm08.local ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T13:32:06.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:05 vm08.local ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T13:32:06.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:05 vm08.local ceph-mon[82639]: from='mgr.44106 ' entity='mgr.y' 2026-03-10T13:32:07.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:06 vm00.local ceph-mon[94470]: pgmap v203: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:32:07.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:06 vm00.local ceph-mon[94470]: from='client.44571 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:32:07.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:32:07 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:32:07.017Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:32:07.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:32:07 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:32:07.018Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:32:07.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:06 vm00.local ceph-mon[96293]: pgmap v203: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:32:07.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:06 vm00.local ceph-mon[96293]: from='client.44571 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:32:07.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:06 vm08.local ceph-mon[82639]: pgmap v203: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:32:07.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:06 vm08.local ceph-mon[82639]: from='client.44571 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:32:09.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:08 vm00.local ceph-mon[94470]: pgmap v204: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:32:09.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:08 vm00.local ceph-mon[96293]: pgmap v204: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:32:09.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:08 vm08.local ceph-mon[82639]: pgmap v204: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:32:11.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:10 vm00.local ceph-mon[96293]: pgmap v205: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:32:11.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:10 vm00.local ceph-mon[94470]: pgmap v205: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:32:11.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:10 vm08.local ceph-mon[82639]: pgmap v205: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:32:13.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:12 vm00.local ceph-mon[94470]: pgmap v206: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:32:13.253 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:12 vm00.local ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:32:13.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:12 vm00.local ceph-mon[96293]: pgmap v206: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:32:13.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:12 vm00.local ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:32:13.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:12 vm08.local ceph-mon[82639]: pgmap v206: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:32:13.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:12 vm08.local ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:32:15.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:14 vm00.local ceph-mon[94470]: pgmap v207: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:32:15.252 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:14 vm00.local ceph-mon[96293]: pgmap v207: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:32:15.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:14 vm08.local ceph-mon[82639]: pgmap v207: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:32:16.002 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:32:15 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:32:15] "GET /metrics HTTP/1.1" 200 37990 "" "Prometheus/2.51.0" 2026-03-10T13:32:17.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:16 vm00.local ceph-mon[94470]: pgmap v208: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:32:17.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:16 vm00.local ceph-mon[94470]: from='client.44571 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:32:17.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:32:17 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:32:17.018Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:32:17.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:32:17 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:32:17.019Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:32:17.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:16 vm00.local ceph-mon[96293]: pgmap v208: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:32:17.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:16 vm00.local ceph-mon[96293]: from='client.44571 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:32:17.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:16 vm08.local ceph-mon[82639]: pgmap v208: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:32:17.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:16 vm08.local ceph-mon[82639]: from='client.44571 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:32:19.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:18 vm00.local ceph-mon[94470]: pgmap v209: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:32:19.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:18 vm00.local ceph-mon[96293]: pgmap v209: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:32:19.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:18 vm08.local ceph-mon[82639]: pgmap v209: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:32:21.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:20 vm00.local ceph-mon[94470]: pgmap v210: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:32:21.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:20 vm00.local ceph-mon[96293]: pgmap v210: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:32:21.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:20 vm08.local ceph-mon[82639]: pgmap v210: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:32:23.252 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:22 vm00.local ceph-mon[94470]: pgmap v211: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:32:23.253 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:22 vm00.local ceph-mon[96293]: pgmap v211: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:32:23.270 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:22 vm08.local ceph-mon[82639]: pgmap v211: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:32:25.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:25 vm00.local ceph-mon[94470]: pgmap v212: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:32:25.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:25 vm00.local ceph-mon[96293]: pgmap v212: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:32:25.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:25 vm08.local ceph-mon[82639]: pgmap v212: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:32:25.948 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:32:25 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:32:25] "GET /metrics HTTP/1.1" 200 37990 "" "Prometheus/2.51.0" 2026-03-10T13:32:27.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:27 vm00.local ceph-mon[94470]: pgmap v213: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:32:27.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:27 vm00.local ceph-mon[94470]: from='client.44571 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:32:27.503 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:32:27 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:32:27.019Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:32:27.503 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:32:27 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:32:27.020Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:32:27.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:27 vm00.local ceph-mon[96293]: pgmap v213: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:32:27.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:27 vm00.local ceph-mon[96293]: from='client.44571 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:32:27.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:27 vm08.local ceph-mon[82639]: pgmap v213: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:32:27.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:27 vm08.local ceph-mon[82639]: from='client.44571 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:32:28.054 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-10T13:32:28.265 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:28 vm00.local ceph-mon[94470]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:32:28.266 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:28 vm00.local ceph-mon[96293]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:32:28.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:28 vm08.local ceph-mon[82639]: from='mgr.44106 192.168.123.100:0/1747376812' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T13:32:28.640 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T13:32:28.640 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.a vm00 *:9093,9094 running (18m) 2m ago 25m 26.4M - 0.25.0 c8568f914cd2 12fde3cf83cb 2026-03-10T13:32:28.640 INFO:teuthology.orchestra.run.vm00.stdout:grafana.a vm08 *:3000 running (6m) 2m ago 25m 76.5M - 10.4.0 c8b91775d855 960e32589e98 2026-03-10T13:32:28.640 INFO:teuthology.orchestra.run.vm00.stdout:iscsi.foo.vm00.dezodo vm00 running (6m) 2m ago 25m 49.7M - 3.5 e1d6a67b021e 630bf6d4e7f3 2026-03-10T13:32:28.640 INFO:teuthology.orchestra.run.vm00.stdout:mgr.x vm08 *:8443,9283,8765 running (6m) 2m ago 27m 488M - 19.2.3-678-ge911bdeb 654f31e6858e 31b91eebc856 2026-03-10T13:32:28.640 INFO:teuthology.orchestra.run.vm00.stdout:mgr.y vm00 *:8443,9283,8765 running (13m) 2m ago 28m 563M - 19.2.3-678-ge911bdeb 654f31e6858e 5bc576d4d32b 2026-03-10T13:32:28.640 INFO:teuthology.orchestra.run.vm00.stdout:mon.a vm00 running (5m) 2m ago 28m 56.5M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 981df6371890 2026-03-10T13:32:28.640 INFO:teuthology.orchestra.run.vm00.stdout:mon.b vm08 running (6m) 2m ago 27m 52.7M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 8cceb678a9ee 2026-03-10T13:32:28.640 INFO:teuthology.orchestra.run.vm00.stdout:mon.c vm00 running (5m) 2m ago 27m 47.1M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 43deda66dee3 2026-03-10T13:32:28.640 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.a vm00 *:9100 running (18m) 2m ago 25m 10.5M - 1.7.0 72c9c2088986 bcf883401619 2026-03-10T13:32:28.640 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.b vm08 *:9100 running (18m) 2m ago 25m 10.2M - 1.7.0 72c9c2088986 4ac83f03f818 2026-03-10T13:32:28.641 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm00 running (4m) 2m ago 27m 53.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 5fc74f4d2179 2026-03-10T13:32:28.641 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (4m) 2m ago 26m 74.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e dc65e199e9eb 2026-03-10T13:32:28.641 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm00 running (4m) 2m ago 26m 70.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 706171e0f5c2 2026-03-10T13:32:28.641 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (3m) 2m ago 26m 52.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 8739c77cf14d 2026-03-10T13:32:28.641 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm08 running (3m) 2m ago 26m 75.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 5b92674798b7 2026-03-10T13:32:28.641 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm08 running (3m) 2m ago 26m 48.8M 4096M 19.2.3-678-ge911bdeb 654f31e6858e e197b6bd6561 2026-03-10T13:32:28.641 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm08 running (2m) 2m ago 26m 71.9M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 529ca6d92bc2 2026-03-10T13:32:28.641 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm08 running (2m) 2m ago 25m 47.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e f59d4e9eed6e 2026-03-10T13:32:28.641 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.a vm08 *:9095 running (6m) 2m ago 25m 48.5M - 2.51.0 1d3b7f56885b 3f9b2d0821c9 2026-03-10T13:32:28.641 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm00.tvlvzo vm00 *:8000 running (2m) 2m ago 25m 97.8M - 19.2.3-678-ge911bdeb 654f31e6858e 580ac5f891f7 2026-03-10T13:32:28.641 INFO:teuthology.orchestra.run.vm00.stdout:rgw.foo.vm08.ljayps vm08 *:8000 running (2m) 2m ago 25m 98.6M - 19.2.3-678-ge911bdeb 654f31e6858e 9f8220442a4b 2026-03-10T13:32:28.892 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions' 2026-03-10T13:32:29.148 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:29 vm00.local ceph-mon[94470]: pgmap v214: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:32:29.148 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:29 vm00.local ceph-mon[96293]: pgmap v214: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:32:29.513 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:32:29.513 INFO:teuthology.orchestra.run.vm00.stdout: "mon": { 2026-03-10T13:32:29.513 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-10T13:32:29.513 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:32:29.513 INFO:teuthology.orchestra.run.vm00.stdout: "mgr": { 2026-03-10T13:32:29.513 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T13:32:29.513 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:32:29.513 INFO:teuthology.orchestra.run.vm00.stdout: "osd": { 2026-03-10T13:32:29.513 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 8 2026-03-10T13:32:29.513 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:32:29.513 INFO:teuthology.orchestra.run.vm00.stdout: "rgw": { 2026-03-10T13:32:29.513 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T13:32:29.513 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-03-10T13:32:29.513 INFO:teuthology.orchestra.run.vm00.stdout: "overall": { 2026-03-10T13:32:29.513 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 15 2026-03-10T13:32:29.513 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-03-10T13:32:29.513 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:32:29.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:29 vm08.local ceph-mon[82639]: pgmap v214: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:32:29.574 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade status' 2026-03-10T13:32:30.128 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:32:30.128 INFO:teuthology.orchestra.run.vm00.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T13:32:30.128 INFO:teuthology.orchestra.run.vm00.stdout: "in_progress": true, 2026-03-10T13:32:30.128 INFO:teuthology.orchestra.run.vm00.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-10T13:32:30.128 INFO:teuthology.orchestra.run.vm00.stdout: "services_complete": [ 2026-03-10T13:32:30.128 INFO:teuthology.orchestra.run.vm00.stdout: "mon", 2026-03-10T13:32:30.128 INFO:teuthology.orchestra.run.vm00.stdout: "mgr", 2026-03-10T13:32:30.128 INFO:teuthology.orchestra.run.vm00.stdout: "rgw", 2026-03-10T13:32:30.128 INFO:teuthology.orchestra.run.vm00.stdout: "osd" 2026-03-10T13:32:30.128 INFO:teuthology.orchestra.run.vm00.stdout: ], 2026-03-10T13:32:30.128 INFO:teuthology.orchestra.run.vm00.stdout: "progress": "15/21 daemons upgraded", 2026-03-10T13:32:30.128 INFO:teuthology.orchestra.run.vm00.stdout: "message": "Error: UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm00.dezodo on host vm00 failed.", 2026-03-10T13:32:30.128 INFO:teuthology.orchestra.run.vm00.stdout: "is_paused": true 2026-03-10T13:32:30.128 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:32:30.213 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph health detail' 2026-03-10T13:32:30.399 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:30 vm00.local ceph-mon[96293]: from='client.44631 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:32:30.400 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:30 vm00.local ceph-mon[96293]: from='client.? 192.168.123.100:0/3143002345' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:32:30.400 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:30 vm00.local ceph-mon[94470]: from='client.44631 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:32:30.400 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:30 vm00.local ceph-mon[94470]: from='client.? 192.168.123.100:0/3143002345' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:32:30.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:30 vm08.local ceph-mon[82639]: from='client.44631 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:32:30.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:30 vm08.local ceph-mon[82639]: from='client.? 192.168.123.100:0/3143002345' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:32:30.850 INFO:teuthology.orchestra.run.vm00.stdout:HEALTH_WARN Upgrading daemon iscsi.foo.vm00.dezodo on host vm00 failed. 2026-03-10T13:32:30.851 INFO:teuthology.orchestra.run.vm00.stdout:[WRN] UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm00.dezodo on host vm00 failed. 2026-03-10T13:32:30.851 INFO:teuthology.orchestra.run.vm00.stdout: Upgrade daemon: iscsi.foo.vm00.dezodo: cephadm exited with an error code: 1, stderr: Redeploy daemon iscsi.foo.vm00.dezodo ... 2026-03-10T13:32:30.851 INFO:teuthology.orchestra.run.vm00.stdout:Creating ceph-iscsi config... 2026-03-10T13:32:30.851 INFO:teuthology.orchestra.run.vm00.stdout:Write file: /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/iscsi.foo.vm00.dezodo/iscsi-gateway.cfg 2026-03-10T13:32:30.851 INFO:teuthology.orchestra.run.vm00.stdout:Write file: /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/iscsi.foo.vm00.dezodo/tcmu-runner-entrypoint.sh 2026-03-10T13:32:30.851 INFO:teuthology.orchestra.run.vm00.stdout:Failed to trim old cgroups /sys/fs/cgroup/system.slice/system-ceph\x2d98a3dada\x2d1c81\x2d11f1\x2d89c9\x2dd57c120f78d5.slice/ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service 2026-03-10T13:32:30.851 INFO:teuthology.orchestra.run.vm00.stdout:Non-zero exit code 1 from systemctl start ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo 2026-03-10T13:32:30.851 INFO:teuthology.orchestra.run.vm00.stdout:systemctl: stderr Job for ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service failed because the control process exited with error code. 2026-03-10T13:32:30.851 INFO:teuthology.orchestra.run.vm00.stdout:systemctl: stderr See "systemctl status ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" and "journalctl -xeu ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" for details. 2026-03-10T13:32:30.851 INFO:teuthology.orchestra.run.vm00.stdout:Traceback (most recent call last): 2026-03-10T13:32:30.851 INFO:teuthology.orchestra.run.vm00.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-10T13:32:30.851 INFO:teuthology.orchestra.run.vm00.stdout: return _run_code(code, main_globals, None, 2026-03-10T13:32:30.851 INFO:teuthology.orchestra.run.vm00.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-10T13:32:30.851 INFO:teuthology.orchestra.run.vm00.stdout: exec(code, run_globals) 2026-03-10T13:32:30.851 INFO:teuthology.orchestra.run.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-10T13:32:30.851 INFO:teuthology.orchestra.run.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-10T13:32:30.851 INFO:teuthology.orchestra.run.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-10T13:32:30.851 INFO:teuthology.orchestra.run.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-10T13:32:30.851 INFO:teuthology.orchestra.run.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-10T13:32:30.851 INFO:teuthology.orchestra.run.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1090, in deploy_daemon 2026-03-10T13:32:30.851 INFO:teuthology.orchestra.run.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1237, in deploy_daemon_units 2026-03-10T13:32:30.851 INFO:teuthology.orchestra.run.vm00.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-10T13:32:30.851 INFO:teuthology.orchestra.run.vm00.stdout:RuntimeError: Failed command: systemctl start ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo: Job for ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service failed because the control process exited with error code. 2026-03-10T13:32:30.851 INFO:teuthology.orchestra.run.vm00.stdout:See "systemctl status ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" and "journalctl -xeu ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@iscsi.foo.vm00.dezodo.service" for details. 2026-03-10T13:32:30.950 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.overall | length == 1'"'"'' 2026-03-10T13:32:31.140 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:31 vm00.local ceph-mon[94470]: pgmap v215: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:32:31.140 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:31 vm00.local ceph-mon[94470]: from='client.? 192.168.123.100:0/1038683529' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T13:32:31.140 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:31 vm00.local ceph-mon[96293]: pgmap v215: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:32:31.140 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:31 vm00.local ceph-mon[96293]: from='client.? 192.168.123.100:0/1038683529' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T13:32:31.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:31 vm08.local ceph-mon[82639]: pgmap v215: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:32:31.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:31 vm08.local ceph-mon[82639]: from='client.? 192.168.123.100:0/1038683529' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T13:32:31.579 INFO:teuthology.orchestra.run.vm00.stdout:true 2026-03-10T13:32:31.642 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.overall | keys'"'"' | grep $sha1' 2026-03-10T13:32:32.246 INFO:teuthology.orchestra.run.vm00.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)" 2026-03-10T13:32:32.304 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ls | grep '"'"'^osd '"'"'' 2026-03-10T13:32:32.471 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:32 vm00.local ceph-mon[94470]: from='client.54565 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:32:32.471 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:32 vm00.local ceph-mon[94470]: from='client.? 192.168.123.100:0/1496912454' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:32:32.471 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:32 vm00.local ceph-mon[96293]: from='client.54565 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:32:32.471 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:32 vm00.local ceph-mon[96293]: from='client.? 192.168.123.100:0/1496912454' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:32:32.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:32 vm08.local ceph-mon[82639]: from='client.54565 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:32:32.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:32 vm08.local ceph-mon[82639]: from='client.? 192.168.123.100:0/1496912454' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:32:32.805 INFO:teuthology.orchestra.run.vm00.stdout:osd 8 2m ago - 2026-03-10T13:32:32.866 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-10T13:32:32.868 INFO:tasks.cephadm:Running commands on role mon.a host ubuntu@vm00.local 2026-03-10T13:32:32.868 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- bash -c 'ceph orch upgrade ls' 2026-03-10T13:32:33.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:33 vm00.local ceph-mon[94470]: pgmap v216: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:32:33.503 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:33 vm00.local ceph-mon[94470]: from='client.? 192.168.123.100:0/1195789290' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:32:33.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:33 vm00.local ceph-mon[96293]: pgmap v216: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:32:33.503 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:33 vm00.local ceph-mon[96293]: from='client.? 192.168.123.100:0/1195789290' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:32:33.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:33 vm08.local ceph-mon[82639]: pgmap v216: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:32:33.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:33 vm08.local ceph-mon[82639]: from='client.? 192.168.123.100:0/1195789290' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T13:32:34.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:34 vm00.local ceph-mon[94470]: from='client.54586 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:32:34.502 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:34 vm00.local ceph-mon[96293]: from='client.54586 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:32:34.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:34 vm08.local ceph-mon[82639]: from='client.54586 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:32:34.931 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-03-10T13:32:34.931 INFO:teuthology.orchestra.run.vm00.stdout: "image": "quay.io/ceph/ceph", 2026-03-10T13:32:34.931 INFO:teuthology.orchestra.run.vm00.stdout: "registry": "quay.io", 2026-03-10T13:32:34.931 INFO:teuthology.orchestra.run.vm00.stdout: "bare_image": "ceph/ceph", 2026-03-10T13:32:34.931 INFO:teuthology.orchestra.run.vm00.stdout: "versions": [ 2026-03-10T13:32:34.931 INFO:teuthology.orchestra.run.vm00.stdout: "20.2.0", 2026-03-10T13:32:34.931 INFO:teuthology.orchestra.run.vm00.stdout: "20.1.1", 2026-03-10T13:32:34.931 INFO:teuthology.orchestra.run.vm00.stdout: "20.1.0", 2026-03-10T13:32:34.931 INFO:teuthology.orchestra.run.vm00.stdout: "19.2.3", 2026-03-10T13:32:34.931 INFO:teuthology.orchestra.run.vm00.stdout: "19.2.2", 2026-03-10T13:32:34.931 INFO:teuthology.orchestra.run.vm00.stdout: "19.2.1", 2026-03-10T13:32:34.931 INFO:teuthology.orchestra.run.vm00.stdout: "19.2.0" 2026-03-10T13:32:34.931 INFO:teuthology.orchestra.run.vm00.stdout: ] 2026-03-10T13:32:34.931 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-03-10T13:32:34.995 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- bash -c 'ceph orch upgrade ls --image quay.io/ceph/ceph --show-all-versions | grep 16.2.0' 2026-03-10T13:32:35.187 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:35 vm00.local ceph-mon[96293]: from='client.34589 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:32:35.187 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:35 vm00.local ceph-mon[96293]: pgmap v217: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:32:35.187 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:35 vm00.local ceph-mon[94470]: from='client.34589 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:32:35.187 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:35 vm00.local ceph-mon[94470]: pgmap v217: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:32:35.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:35 vm08.local ceph-mon[82639]: from='client.34589 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:32:35.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:35 vm08.local ceph-mon[82639]: pgmap v217: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:32:35.960 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:32:35 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: ::ffff:192.168.123.108 - - [10/Mar/2026:13:32:35] "GET /metrics HTTP/1.1" 200 37989 "" "Prometheus/2.51.0" 2026-03-10T13:32:36.892 INFO:teuthology.orchestra.run.vm00.stdout: "16.2.0", 2026-03-10T13:32:36.936 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- bash -c 'ceph orch upgrade ls --image quay.io/ceph/ceph --tags | grep v16.2.2' 2026-03-10T13:32:37.154 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:37 vm00.local ceph-mon[94470]: from='client.54589 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "image": "quay.io/ceph/ceph", "show_all_versions": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:32:37.155 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:37 vm00.local ceph-mon[94470]: pgmap v218: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:32:37.155 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:37 vm00.local ceph-mon[94470]: from='client.44571 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:32:37.155 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:37 vm00.local ceph-mon[96293]: from='client.54589 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "image": "quay.io/ceph/ceph", "show_all_versions": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:32:37.155 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:37 vm00.local ceph-mon[96293]: pgmap v218: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:32:37.155 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:37 vm00.local ceph-mon[96293]: from='client.44571 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:32:37.155 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:32:37 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:32:37.020Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:32:37.155 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:32:37 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:32:37.020Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:32:37.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:37 vm08.local ceph-mon[82639]: from='client.54589 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "image": "quay.io/ceph/ceph", "show_all_versions": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:32:37.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:37 vm08.local ceph-mon[82639]: pgmap v218: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T13:32:37.520 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:37 vm08.local ceph-mon[82639]: from='client.44571 -' entity='client.iscsi.foo.vm00.dezodo' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T13:32:38.917 INFO:teuthology.orchestra.run.vm00.stdout: "v16.2.2", 2026-03-10T13:32:38.918 INFO:teuthology.orchestra.run.vm00.stdout: "v16.2.2-20210505", 2026-03-10T13:32:38.974 DEBUG:teuthology.run_tasks:Unwinding manager cephadm 2026-03-10T13:32:38.976 INFO:tasks.cephadm:Teardown begin 2026-03-10T13:32:38.976 DEBUG:teuthology.orchestra.run.vm00:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-10T13:32:39.001 DEBUG:teuthology.orchestra.run.vm08:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-10T13:32:39.027 INFO:tasks.cephadm:Disabling cephadm mgr module 2026-03-10T13:32:39.027 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 -- ceph mgr module disable cephadm 2026-03-10T13:32:39.218 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:39 vm00.local ceph-mon[94470]: pgmap v219: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:32:39.218 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:39 vm00.local ceph-mon[94470]: from='client.54595 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "image": "quay.io/ceph/ceph", "tags": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:32:39.218 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:39 vm00.local ceph-mon[96293]: pgmap v219: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:32:39.218 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:39 vm00.local ceph-mon[96293]: from='client.54595 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "image": "quay.io/ceph/ceph", "tags": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:32:39.317 INFO:teuthology.orchestra.run.vm00.stderr:Error: statfs /etc/ceph/ceph.conf: no such file or directory 2026-03-10T13:32:39.338 DEBUG:teuthology.orchestra.run:got remote process result: 125 2026-03-10T13:32:39.339 INFO:tasks.cephadm:Cleaning up testdir ceph.* files... 2026-03-10T13:32:39.339 DEBUG:teuthology.orchestra.run.vm00:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-03-10T13:32:39.356 DEBUG:teuthology.orchestra.run.vm08:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-03-10T13:32:39.372 INFO:tasks.cephadm:Stopping all daemons... 2026-03-10T13:32:39.372 INFO:tasks.cephadm.mon.a:Stopping mon.a... 2026-03-10T13:32:39.372 DEBUG:teuthology.orchestra.run.vm00:> sudo systemctl stop ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mon.a 2026-03-10T13:32:39.502 INFO:journalctl@ceph.mon.a.vm00.stdout:Mar 10 13:32:39 vm00.local systemd[1]: Stopping Ceph mon.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:32:39.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:39 vm08.local ceph-mon[82639]: pgmap v219: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T13:32:39.522 INFO:journalctl@ceph.mon.b.vm08.stdout:Mar 10 13:32:39 vm08.local ceph-mon[82639]: from='client.54595 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "image": "quay.io/ceph/ceph", "tags": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T13:32:39.654 DEBUG:teuthology.orchestra.run.vm00:> sudo pkill -f 'journalctl -f -n 0 -u ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mon.a.service' 2026-03-10T13:32:39.694 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T13:32:39.694 INFO:tasks.cephadm.mon.a:Stopped mon.a 2026-03-10T13:32:39.694 INFO:tasks.cephadm.mon.b:Stopping mon.c... 2026-03-10T13:32:39.694 DEBUG:teuthology.orchestra.run.vm00:> sudo systemctl stop ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mon.c 2026-03-10T13:32:40.022 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:39 vm00.local systemd[1]: Stopping Ceph mon.c for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:32:40.022 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:39 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mon-c[96289]: 2026-03-10T13:32:39.848+0000 7fb864c59640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.c -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T13:32:40.022 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:39 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mon-c[96289]: 2026-03-10T13:32:39.848+0000 7fb864c59640 -1 mon.c@1(peon) e4 *** Got Signal Terminated *** 2026-03-10T13:32:40.023 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:39 vm00.local podman[129042]: 2026-03-10 13:32:39.941742896 +0000 UTC m=+0.110358494 container died 43deda66dee35e12d8896091c92b05e1e68ed5aea0df0457ccc3ba3237b80fdb (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mon-c, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T13:32:40.023 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:39 vm00.local podman[129042]: 2026-03-10 13:32:39.968605801 +0000 UTC m=+0.137221399 container remove 43deda66dee35e12d8896091c92b05e1e68ed5aea0df0457ccc3ba3237b80fdb (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mon-c, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, OSD_FLAVOR=default) 2026-03-10T13:32:40.023 INFO:journalctl@ceph.mon.c.vm00.stdout:Mar 10 13:32:39 vm00.local bash[129042]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mon-c 2026-03-10T13:32:40.033 DEBUG:teuthology.orchestra.run.vm00:> sudo pkill -f 'journalctl -f -n 0 -u ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mon.c.service' 2026-03-10T13:32:40.072 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T13:32:40.072 INFO:tasks.cephadm.mon.b:Stopped mon.c 2026-03-10T13:32:40.072 INFO:tasks.cephadm.mon.b:Stopping mon.b... 2026-03-10T13:32:40.072 DEBUG:teuthology.orchestra.run.vm08:> sudo systemctl stop ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mon.b 2026-03-10T13:32:40.316 DEBUG:teuthology.orchestra.run.vm08:> sudo pkill -f 'journalctl -f -n 0 -u ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mon.b.service' 2026-03-10T13:32:40.353 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T13:32:40.353 INFO:tasks.cephadm.mon.b:Stopped mon.b 2026-03-10T13:32:40.353 INFO:tasks.cephadm.mgr.y:Stopping mgr.y... 2026-03-10T13:32:40.353 DEBUG:teuthology.orchestra.run.vm00:> sudo systemctl stop ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mgr.y 2026-03-10T13:32:40.620 DEBUG:teuthology.orchestra.run.vm00:> sudo pkill -f 'journalctl -f -n 0 -u ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mgr.y.service' 2026-03-10T13:32:40.645 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:32:40 vm00.local systemd[1]: Stopping Ceph mgr.y for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:32:40.646 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:32:40 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y[81504]: 2026-03-10T13:32:40.460+0000 7f1476192640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mgr -n mgr.y -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T13:32:40.646 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:32:40 vm00.local podman[129145]: 2026-03-10 13:32:40.51332739 +0000 UTC m=+0.066006882 container died 5bc576d4d32be55f1074bb50ab6e5fff01b91e80bb3eb41cd22ab252461820b8 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T13:32:40.646 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:32:40 vm00.local podman[129145]: 2026-03-10 13:32:40.543718129 +0000 UTC m=+0.096397621 container remove 5bc576d4d32be55f1074bb50ab6e5fff01b91e80bb3eb41cd22ab252461820b8 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0) 2026-03-10T13:32:40.646 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:32:40 vm00.local bash[129145]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-y 2026-03-10T13:32:40.646 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:32:40 vm00.local systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mgr.y.service: Deactivated successfully. 2026-03-10T13:32:40.646 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:32:40 vm00.local systemd[1]: Stopped Ceph mgr.y for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:32:40.646 INFO:journalctl@ceph.mgr.y.vm00.stdout:Mar 10 13:32:40 vm00.local systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mgr.y.service: Consumed 23.180s CPU time. 2026-03-10T13:32:40.659 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T13:32:40.659 INFO:tasks.cephadm.mgr.y:Stopped mgr.y 2026-03-10T13:32:40.660 INFO:tasks.cephadm.mgr.x:Stopping mgr.x... 2026-03-10T13:32:40.660 DEBUG:teuthology.orchestra.run.vm08:> sudo systemctl stop ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mgr.x 2026-03-10T13:32:40.914 DEBUG:teuthology.orchestra.run.vm08:> sudo pkill -f 'journalctl -f -n 0 -u ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mgr.x.service' 2026-03-10T13:32:40.941 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:32:40 vm08.local systemd[1]: Stopping Ceph mgr.x for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:32:40.941 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:32:40 vm08.local podman[110290]: 2026-03-10 13:32:40.821986317 +0000 UTC m=+0.061896229 container died 31b91eebc8566a685ff6ef1ced5d07555b7ef2994a2998dc1182247f362c5f17 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, ceph=True, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, CEPH_REF=squid) 2026-03-10T13:32:40.941 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:32:40 vm08.local podman[110290]: 2026-03-10 13:32:40.846448237 +0000 UTC m=+0.086358149 container remove 31b91eebc8566a685ff6ef1ced5d07555b7ef2994a2998dc1182247f362c5f17 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, OSD_FLAVOR=default, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3) 2026-03-10T13:32:40.941 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:32:40 vm08.local bash[110290]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-mgr-x 2026-03-10T13:32:40.941 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:32:40 vm08.local systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mgr.x.service: Main process exited, code=exited, status=143/n/a 2026-03-10T13:32:40.941 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:32:40 vm08.local systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mgr.x.service: Failed with result 'exit-code'. 2026-03-10T13:32:40.941 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:32:40 vm08.local systemd[1]: Stopped Ceph mgr.x for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:32:40.941 INFO:journalctl@ceph.mgr.x.vm08.stdout:Mar 10 13:32:40 vm08.local systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@mgr.x.service: Consumed 7.119s CPU time. 2026-03-10T13:32:40.951 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T13:32:40.952 INFO:tasks.cephadm.mgr.x:Stopped mgr.x 2026-03-10T13:32:40.952 INFO:tasks.cephadm.osd.0:Stopping osd.0... 2026-03-10T13:32:40.952 DEBUG:teuthology.orchestra.run.vm00:> sudo systemctl stop ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.0 2026-03-10T13:32:41.253 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:32:40 vm00.local systemd[1]: Stopping Ceph osd.0 for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:32:41.253 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:32:41 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0[102078]: 2026-03-10T13:32:41.072+0000 7f12dacbb640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T13:32:41.253 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:32:41 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0[102078]: 2026-03-10T13:32:41.072+0000 7f12dacbb640 -1 osd.0 140 *** Got signal Terminated *** 2026-03-10T13:32:41.253 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:32:41 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0[102078]: 2026-03-10T13:32:41.072+0000 7f12dacbb640 -1 osd.0 140 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T13:32:45.520 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:32:45 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[79338]: ts=2026-03-10T13:32:45.113Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=nvmeof msg="Unable to refresh target groups" err="Get \"http://192.168.123.100:8765/sd/prometheus/sd-config?service=nvmeof\": dial tcp 192.168.123.100:8765: connect: connection refused" 2026-03-10T13:32:45.521 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:32:45 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[79338]: ts=2026-03-10T13:32:45.114Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=node msg="Unable to refresh target groups" err="Get \"http://192.168.123.100:8765/sd/prometheus/sd-config?service=node-exporter\": dial tcp 192.168.123.100:8765: connect: connection refused" 2026-03-10T13:32:45.521 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:32:45 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[79338]: ts=2026-03-10T13:32:45.114Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=nfs msg="Unable to refresh target groups" err="Get \"http://192.168.123.100:8765/sd/prometheus/sd-config?service=nfs\": dial tcp 192.168.123.100:8765: connect: connection refused" 2026-03-10T13:32:45.521 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:32:45 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[79338]: ts=2026-03-10T13:32:45.114Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=ceph msg="Unable to refresh target groups" err="Get \"http://192.168.123.100:8765/sd/prometheus/sd-config?service=mgr-prometheus\": dial tcp 192.168.123.100:8765: connect: connection refused" 2026-03-10T13:32:45.521 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:32:45 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[79338]: ts=2026-03-10T13:32:45.114Z caller=refresh.go:90 level=error component="discovery manager notify" discovery=http config=config-0 msg="Unable to refresh target groups" err="Get \"http://192.168.123.100:8765/sd/prometheus/sd-config?service=alertmanager\": dial tcp 192.168.123.100:8765: connect: connection refused" 2026-03-10T13:32:45.521 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:32:45 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-prometheus-a[79338]: ts=2026-03-10T13:32:45.114Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=ceph-exporter msg="Unable to refresh target groups" err="Get \"http://192.168.123.100:8765/sd/prometheus/sd-config?service=ceph-exporter\": dial tcp 192.168.123.100:8765: connect: connection refused" 2026-03-10T13:32:46.385 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:32:46 vm00.local podman[129248]: 2026-03-10 13:32:46.110152966 +0000 UTC m=+5.051760456 container died 5fc74f4d21799f9910b5d2765dd5ccb080a8c8bf5820c225ffc639a3f3e0f0ad (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, CEPH_REF=squid) 2026-03-10T13:32:46.385 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:32:46 vm00.local podman[129248]: 2026-03-10 13:32:46.136636472 +0000 UTC m=+5.078243962 container remove 5fc74f4d21799f9910b5d2765dd5ccb080a8c8bf5820c225ffc639a3f3e0f0ad (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, OSD_FLAVOR=default, ceph=True, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T13:32:46.385 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:32:46 vm00.local bash[129248]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0 2026-03-10T13:32:46.385 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:32:46 vm00.local podman[129314]: 2026-03-10 13:32:46.291695922 +0000 UTC m=+0.015458107 container create 45fe46bb88ce876927d2362afea7707f69521e683cc3c3af10620e8a3e165e06 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, io.buildah.version=1.41.3) 2026-03-10T13:32:46.385 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:32:46 vm00.local podman[129314]: 2026-03-10 13:32:46.330668646 +0000 UTC m=+0.054430831 container init 45fe46bb88ce876927d2362afea7707f69521e683cc3c3af10620e8a3e165e06 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0-deactivate, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T13:32:46.385 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:32:46 vm00.local podman[129314]: 2026-03-10 13:32:46.333218048 +0000 UTC m=+0.056980233 container start 45fe46bb88ce876927d2362afea7707f69521e683cc3c3af10620e8a3e165e06 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0-deactivate, org.label-schema.build-date=20260223, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T13:32:46.385 INFO:journalctl@ceph.osd.0.vm00.stdout:Mar 10 13:32:46 vm00.local podman[129314]: 2026-03-10 13:32:46.336060058 +0000 UTC m=+0.059822243 container attach 45fe46bb88ce876927d2362afea7707f69521e683cc3c3af10620e8a3e165e06 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-0-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, ceph=True, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3) 2026-03-10T13:32:46.509 DEBUG:teuthology.orchestra.run.vm00:> sudo pkill -f 'journalctl -f -n 0 -u ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.0.service' 2026-03-10T13:32:46.547 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T13:32:46.548 INFO:tasks.cephadm.osd.0:Stopped osd.0 2026-03-10T13:32:46.548 INFO:tasks.cephadm.osd.1:Stopping osd.1... 2026-03-10T13:32:46.548 DEBUG:teuthology.orchestra.run.vm00:> sudo systemctl stop ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.1 2026-03-10T13:32:46.722 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:32:46 vm00.local systemd[1]: Stopping Ceph osd.1 for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:32:47.002 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:32:46 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1[106185]: 2026-03-10T13:32:46.720+0000 7fe942c8b640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T13:32:47.002 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:32:46 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1[106185]: 2026-03-10T13:32:46.720+0000 7fe942c8b640 -1 osd.1 140 *** Got signal Terminated *** 2026-03-10T13:32:47.002 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:32:46 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1[106185]: 2026-03-10T13:32:46.720+0000 7fe942c8b640 -1 osd.1 140 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T13:32:47.502 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:32:47 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:32:47.020Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:32:47.502 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:32:47 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:32:47.021Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:32:52.029 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:32:51 vm00.local podman[129412]: 2026-03-10 13:32:51.752737312 +0000 UTC m=+5.053437568 container died dc65e199e9ebe6db35777e2d4a16bd3caa1e65053b05529aa4472e815e9dd88a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1, ceph=True, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T13:32:52.029 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:32:51 vm00.local podman[129412]: 2026-03-10 13:32:51.775414662 +0000 UTC m=+5.076114918 container remove dc65e199e9ebe6db35777e2d4a16bd3caa1e65053b05529aa4472e815e9dd88a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T13:32:52.029 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:32:51 vm00.local bash[129412]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1 2026-03-10T13:32:52.029 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:32:51 vm00.local podman[129495]: 2026-03-10 13:32:51.937122336 +0000 UTC m=+0.017350930 container create fdd5f543fe2539086708e5ac60b7380598374fcce4c242cda793d54edafb7399 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1-deactivate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T13:32:52.029 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:32:51 vm00.local podman[129495]: 2026-03-10 13:32:51.981494778 +0000 UTC m=+0.061723383 container init fdd5f543fe2539086708e5ac60b7380598374fcce4c242cda793d54edafb7399 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1-deactivate, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T13:32:52.029 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:32:51 vm00.local podman[129495]: 2026-03-10 13:32:51.98504742 +0000 UTC m=+0.065276014 container start fdd5f543fe2539086708e5ac60b7380598374fcce4c242cda793d54edafb7399 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T13:32:52.029 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:32:51 vm00.local podman[129495]: 2026-03-10 13:32:51.988095073 +0000 UTC m=+0.068323667 container attach fdd5f543fe2539086708e5ac60b7380598374fcce4c242cda793d54edafb7399 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-1-deactivate, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, ceph=True, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T13:32:52.029 INFO:journalctl@ceph.osd.1.vm00.stdout:Mar 10 13:32:52 vm00.local podman[129495]: 2026-03-10 13:32:51.929267944 +0000 UTC m=+0.009496548 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:32:52.167 DEBUG:teuthology.orchestra.run.vm00:> sudo pkill -f 'journalctl -f -n 0 -u ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.1.service' 2026-03-10T13:32:52.206 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T13:32:52.206 INFO:tasks.cephadm.osd.1:Stopped osd.1 2026-03-10T13:32:52.206 INFO:tasks.cephadm.osd.2:Stopping osd.2... 2026-03-10T13:32:52.206 DEBUG:teuthology.orchestra.run.vm00:> sudo systemctl stop ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.2 2026-03-10T13:32:52.288 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:32:52 vm00.local systemd[1]: Stopping Ceph osd.2 for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:32:52.752 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:32:52 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2[111318]: 2026-03-10T13:32:52.350+0000 7fd7ef530640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T13:32:52.752 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:32:52 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2[111318]: 2026-03-10T13:32:52.350+0000 7fd7ef530640 -1 osd.2 140 *** Got signal Terminated *** 2026-03-10T13:32:52.752 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:32:52 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2[111318]: 2026-03-10T13:32:52.350+0000 7fd7ef530640 -1 osd.2 140 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T13:32:57.388 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:32:57 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:32:57.020Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:32:57.388 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:32:57 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:32:57.021Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:32:57.671 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:32:57 vm00.local podman[129591]: 2026-03-10 13:32:57.38746509 +0000 UTC m=+5.051476968 container died 706171e0f5c28f2673efdb1742ac342e5eab5b04565f0a11fffe853d8ab54e70 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T13:32:57.671 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:32:57 vm00.local podman[129591]: 2026-03-10 13:32:57.406019362 +0000 UTC m=+5.070031229 container remove 706171e0f5c28f2673efdb1742ac342e5eab5b04565f0a11fffe853d8ab54e70 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T13:32:57.671 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:32:57 vm00.local bash[129591]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2 2026-03-10T13:32:57.671 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:32:57 vm00.local podman[129658]: 2026-03-10 13:32:57.580056558 +0000 UTC m=+0.025681767 container create fc52356c7f4832634aa97af6e3b689482e1381329bb81375c6cb2594fe49280a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T13:32:57.671 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:32:57 vm00.local podman[129658]: 2026-03-10 13:32:57.621728914 +0000 UTC m=+0.067354134 container init fc52356c7f4832634aa97af6e3b689482e1381329bb81375c6cb2594fe49280a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2-deactivate, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, ceph=True, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T13:32:57.671 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:32:57 vm00.local podman[129658]: 2026-03-10 13:32:57.624727007 +0000 UTC m=+0.070352216 container start fc52356c7f4832634aa97af6e3b689482e1381329bb81375c6cb2594fe49280a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2-deactivate, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid) 2026-03-10T13:32:57.671 INFO:journalctl@ceph.osd.2.vm00.stdout:Mar 10 13:32:57 vm00.local podman[129658]: 2026-03-10 13:32:57.627919434 +0000 UTC m=+0.073544703 container attach fc52356c7f4832634aa97af6e3b689482e1381329bb81375c6cb2594fe49280a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-2-deactivate, io.buildah.version=1.41.3, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223) 2026-03-10T13:32:57.819 DEBUG:teuthology.orchestra.run.vm00:> sudo pkill -f 'journalctl -f -n 0 -u ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.2.service' 2026-03-10T13:32:57.898 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T13:32:57.899 INFO:tasks.cephadm.osd.2:Stopped osd.2 2026-03-10T13:32:57.899 INFO:tasks.cephadm.osd.3:Stopping osd.3... 2026-03-10T13:32:57.899 DEBUG:teuthology.orchestra.run.vm00:> sudo systemctl stop ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.3 2026-03-10T13:32:58.252 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:32:57 vm00.local systemd[1]: Stopping Ceph osd.3 for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:32:58.252 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:32:58 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3[116469]: 2026-03-10T13:32:58.060+0000 7f847d625640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.3 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T13:32:58.252 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:32:58 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3[116469]: 2026-03-10T13:32:58.060+0000 7f847d625640 -1 osd.3 140 *** Got signal Terminated *** 2026-03-10T13:32:58.252 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:32:58 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3[116469]: 2026-03-10T13:32:58.060+0000 7f847d625640 -1 osd.3 140 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T13:33:03.354 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:33:03 vm00.local podman[129753]: 2026-03-10 13:33:03.082349468 +0000 UTC m=+5.040899899 container died 8739c77cf14d740cb93d4969a99c77f8d2219be203a0dcd995a3eb9d9c66fb34 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid) 2026-03-10T13:33:03.354 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:33:03 vm00.local podman[129753]: 2026-03-10 13:33:03.110797531 +0000 UTC m=+5.069347962 container remove 8739c77cf14d740cb93d4969a99c77f8d2219be203a0dcd995a3eb9d9c66fb34 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T13:33:03.354 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:33:03 vm00.local bash[129753]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3 2026-03-10T13:33:03.649 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:33:03 vm00.local podman[129820]: 2026-03-10 13:33:03.254545506 +0000 UTC m=+0.011650100 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:33:03.649 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:33:03 vm00.local podman[129820]: 2026-03-10 13:33:03.453525743 +0000 UTC m=+0.210630336 container create 888a8e8381d28872e48c0ef30d26b61202829cc710f49affcbb6304b2c0eee01 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.build-date=20260223) 2026-03-10T13:33:03.649 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:33:03 vm00.local podman[129820]: 2026-03-10 13:33:03.591525878 +0000 UTC m=+0.348630471 container init 888a8e8381d28872e48c0ef30d26b61202829cc710f49affcbb6304b2c0eee01 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3-deactivate, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, OSD_FLAVOR=default, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, CEPH_REF=squid) 2026-03-10T13:33:03.649 INFO:journalctl@ceph.osd.3.vm00.stdout:Mar 10 13:33:03 vm00.local podman[129820]: 2026-03-10 13:33:03.594498633 +0000 UTC m=+0.351603216 container start 888a8e8381d28872e48c0ef30d26b61202829cc710f49affcbb6304b2c0eee01 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-3-deactivate, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.label-schema.build-date=20260223) 2026-03-10T13:33:03.809 DEBUG:teuthology.orchestra.run.vm00:> sudo pkill -f 'journalctl -f -n 0 -u ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.3.service' 2026-03-10T13:33:03.851 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T13:33:03.851 INFO:tasks.cephadm.osd.3:Stopped osd.3 2026-03-10T13:33:03.851 INFO:tasks.cephadm.osd.4:Stopping osd.4... 2026-03-10T13:33:03.852 DEBUG:teuthology.orchestra.run.vm08:> sudo systemctl stop ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.4 2026-03-10T13:33:04.270 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:33:03 vm08.local systemd[1]: Stopping Ceph osd.4 for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:33:04.271 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:33:03 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4[90804]: 2026-03-10T13:33:03.978+0000 7f3df4c17640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.4 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T13:33:04.271 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:33:03 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4[90804]: 2026-03-10T13:33:03.978+0000 7f3df4c17640 -1 osd.4 140 *** Got signal Terminated *** 2026-03-10T13:33:04.271 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:33:03 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4[90804]: 2026-03-10T13:33:03.978+0000 7f3df4c17640 -1 osd.4 140 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T13:33:07.503 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:33:07 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:33:07.022Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:33:07.503 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:33:07 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:33:07.023Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:33:08.521 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:33:08 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6[98722]: 2026-03-10T13:33:08.089+0000 7f60f61c3640 -1 osd.6 140 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T13:32:43.685678+0000 front 2026-03-10T13:32:43.685669+0000 (oldest deadline 2026-03-10T13:33:07.785272+0000) 2026-03-10T13:33:09.262 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:33:09 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6[98722]: 2026-03-10T13:33:09.097+0000 7f60f61c3640 -1 osd.6 140 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T13:32:43.685678+0000 front 2026-03-10T13:32:43.685669+0000 (oldest deadline 2026-03-10T13:33:07.785272+0000) 2026-03-10T13:33:09.263 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:33:09 vm08.local podman[110395]: 2026-03-10 13:33:09.010765966 +0000 UTC m=+5.060840552 container died 5b92674798b7c7880e9cc0b97f4dcb5ee20701cae3f8b0c51de22a9e7918ec38 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS) 2026-03-10T13:33:09.263 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:33:09 vm08.local podman[110395]: 2026-03-10 13:33:09.037187737 +0000 UTC m=+5.087262313 container remove 5b92674798b7c7880e9cc0b97f4dcb5ee20701cae3f8b0c51de22a9e7918ec38 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4, org.label-schema.vendor=CentOS, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T13:33:09.263 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:33:09 vm08.local bash[110395]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4 2026-03-10T13:33:09.263 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:33:09 vm08.local podman[110473]: 2026-03-10 13:33:09.216928406 +0000 UTC m=+0.020064431 container create 270ba9569d567ebd38a37babab50b218938eed4eb08a8386576ffed40d95b69b (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4-deactivate, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid) 2026-03-10T13:33:09.263 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:33:09 vm08.local podman[110473]: 2026-03-10 13:33:09.253238076 +0000 UTC m=+0.056374090 container init 270ba9569d567ebd38a37babab50b218938eed4eb08a8386576ffed40d95b69b (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4-deactivate, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, io.buildah.version=1.41.3, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T13:33:09.263 INFO:journalctl@ceph.osd.4.vm08.stdout:Mar 10 13:33:09 vm08.local podman[110473]: 2026-03-10 13:33:09.260021845 +0000 UTC m=+0.063157870 container start 270ba9569d567ebd38a37babab50b218938eed4eb08a8386576ffed40d95b69b (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-4-deactivate, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T13:33:09.427 DEBUG:teuthology.orchestra.run.vm08:> sudo pkill -f 'journalctl -f -n 0 -u ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.4.service' 2026-03-10T13:33:09.465 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T13:33:09.465 INFO:tasks.cephadm.osd.4:Stopped osd.4 2026-03-10T13:33:09.466 INFO:tasks.cephadm.osd.5:Stopping osd.5... 2026-03-10T13:33:09.466 DEBUG:teuthology.orchestra.run.vm08:> sudo systemctl stop ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.5 2026-03-10T13:33:10.020 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:33:09 vm08.local systemd[1]: Stopping Ceph osd.5 for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:33:10.020 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:33:09 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5[94578]: 2026-03-10T13:33:09.618+0000 7fcdf10a2640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.5 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T13:33:10.020 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:33:09 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5[94578]: 2026-03-10T13:33:09.618+0000 7fcdf10a2640 -1 osd.5 140 *** Got signal Terminated *** 2026-03-10T13:33:10.020 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:33:09 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5[94578]: 2026-03-10T13:33:09.618+0000 7fcdf10a2640 -1 osd.5 140 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T13:33:10.520 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:33:10 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5[94578]: 2026-03-10T13:33:10.029+0000 7fcded6bb640 -1 osd.5 140 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T13:32:44.486650+0000 front 2026-03-10T13:32:44.486380+0000 (oldest deadline 2026-03-10T13:33:09.786097+0000) 2026-03-10T13:33:10.520 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:33:10 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6[98722]: 2026-03-10T13:33:10.095+0000 7f60f61c3640 -1 osd.6 140 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T13:32:43.685678+0000 front 2026-03-10T13:32:43.685669+0000 (oldest deadline 2026-03-10T13:33:07.785272+0000) 2026-03-10T13:33:11.520 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:33:11 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5[94578]: 2026-03-10T13:33:11.064+0000 7fcded6bb640 -1 osd.5 140 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T13:32:44.486650+0000 front 2026-03-10T13:32:44.486380+0000 (oldest deadline 2026-03-10T13:33:09.786097+0000) 2026-03-10T13:33:11.520 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:33:11 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6[98722]: 2026-03-10T13:33:11.058+0000 7f60f61c3640 -1 osd.6 140 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T13:32:43.685678+0000 front 2026-03-10T13:32:43.685669+0000 (oldest deadline 2026-03-10T13:33:07.785272+0000) 2026-03-10T13:33:12.020 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:11 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:33:11.538+0000 7f8c9c3b7640 -1 osd.7 140 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T13:32:45.249538+0000 front 2026-03-10T13:32:45.249508+0000 (oldest deadline 2026-03-10T13:33:10.549212+0000) 2026-03-10T13:33:12.520 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:33:12 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5[94578]: 2026-03-10T13:33:12.072+0000 7fcded6bb640 -1 osd.5 140 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T13:32:44.486650+0000 front 2026-03-10T13:32:44.486380+0000 (oldest deadline 2026-03-10T13:33:09.786097+0000) 2026-03-10T13:33:12.520 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:33:12 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6[98722]: 2026-03-10T13:33:12.069+0000 7f60f61c3640 -1 osd.6 140 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T13:32:43.685678+0000 front 2026-03-10T13:32:43.685669+0000 (oldest deadline 2026-03-10T13:33:07.785272+0000) 2026-03-10T13:33:13.020 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:12 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:33:12.584+0000 7f8c9c3b7640 -1 osd.7 140 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T13:32:45.249538+0000 front 2026-03-10T13:32:45.249508+0000 (oldest deadline 2026-03-10T13:33:10.549212+0000) 2026-03-10T13:33:13.520 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:33:13 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5[94578]: 2026-03-10T13:33:13.040+0000 7fcded6bb640 -1 osd.5 140 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T13:32:44.486650+0000 front 2026-03-10T13:32:44.486380+0000 (oldest deadline 2026-03-10T13:33:09.786097+0000) 2026-03-10T13:33:13.520 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:33:13 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6[98722]: 2026-03-10T13:33:13.074+0000 7f60f61c3640 -1 osd.6 140 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T13:32:43.685678+0000 front 2026-03-10T13:32:43.685669+0000 (oldest deadline 2026-03-10T13:33:07.785272+0000) 2026-03-10T13:33:14.020 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:13 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:33:13.562+0000 7f8c9c3b7640 -1 osd.7 140 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T13:32:45.249538+0000 front 2026-03-10T13:32:45.249508+0000 (oldest deadline 2026-03-10T13:33:10.549212+0000) 2026-03-10T13:33:14.520 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:33:14 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5[94578]: 2026-03-10T13:33:14.029+0000 7fcded6bb640 -1 osd.5 140 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T13:32:44.486650+0000 front 2026-03-10T13:32:44.486380+0000 (oldest deadline 2026-03-10T13:33:09.786097+0000) 2026-03-10T13:33:14.520 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:33:14 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5[94578]: 2026-03-10T13:33:14.029+0000 7fcded6bb640 -1 osd.5 140 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T13:32:49.786743+0000 front 2026-03-10T13:32:49.786718+0000 (oldest deadline 2026-03-10T13:33:13.286385+0000) 2026-03-10T13:33:14.520 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:33:14 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6[98722]: 2026-03-10T13:33:14.042+0000 7f60f61c3640 -1 osd.6 140 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T13:32:43.685678+0000 front 2026-03-10T13:32:43.685669+0000 (oldest deadline 2026-03-10T13:33:07.785272+0000) 2026-03-10T13:33:14.520 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:33:14 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6[98722]: 2026-03-10T13:33:14.042+0000 7f60f61c3640 -1 osd.6 140 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T13:32:47.785708+0000 front 2026-03-10T13:32:47.785713+0000 (oldest deadline 2026-03-10T13:33:13.685501+0000) 2026-03-10T13:33:14.845 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:33:14 vm08.local podman[110569]: 2026-03-10 13:33:14.646368533 +0000 UTC m=+5.042217991 container died e197b6bd6561a9ff689e393f799509056386b4773f7e123f07d0df412c8b836b (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, CEPH_REF=squid, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2) 2026-03-10T13:33:14.845 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:33:14 vm08.local podman[110569]: 2026-03-10 13:33:14.664703765 +0000 UTC m=+5.060553223 container remove e197b6bd6561a9ff689e393f799509056386b4773f7e123f07d0df412c8b836b (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, ceph=True) 2026-03-10T13:33:14.845 INFO:journalctl@ceph.osd.5.vm08.stdout:Mar 10 13:33:14 vm08.local bash[110569]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-5 2026-03-10T13:33:14.845 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:14 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:33:14.555+0000 7f8c9c3b7640 -1 osd.7 140 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T13:32:45.249538+0000 front 2026-03-10T13:32:45.249508+0000 (oldest deadline 2026-03-10T13:33:10.549212+0000) 2026-03-10T13:33:15.064 DEBUG:teuthology.orchestra.run.vm08:> sudo pkill -f 'journalctl -f -n 0 -u ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.5.service' 2026-03-10T13:33:15.099 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T13:33:15.099 INFO:tasks.cephadm.osd.5:Stopped osd.5 2026-03-10T13:33:15.099 INFO:tasks.cephadm.osd.6:Stopping osd.6... 2026-03-10T13:33:15.099 DEBUG:teuthology.orchestra.run.vm08:> sudo systemctl stop ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.6 2026-03-10T13:33:15.099 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:33:14 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6[98722]: 2026-03-10T13:33:14.992+0000 7f60f61c3640 -1 osd.6 140 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T13:32:43.685678+0000 front 2026-03-10T13:32:43.685669+0000 (oldest deadline 2026-03-10T13:33:07.785272+0000) 2026-03-10T13:33:15.099 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:33:14 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6[98722]: 2026-03-10T13:33:14.992+0000 7f60f61c3640 -1 osd.6 140 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T13:32:47.785708+0000 front 2026-03-10T13:32:47.785713+0000 (oldest deadline 2026-03-10T13:33:13.685501+0000) 2026-03-10T13:33:15.521 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:33:15 vm08.local systemd[1]: Stopping Ceph osd.6 for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:33:15.521 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:33:15 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6[98722]: 2026-03-10T13:33:15.254+0000 7f60fa3ab640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.6 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T13:33:15.521 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:33:15 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6[98722]: 2026-03-10T13:33:15.254+0000 7f60fa3ab640 -1 osd.6 140 *** Got signal Terminated *** 2026-03-10T13:33:15.521 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:33:15 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6[98722]: 2026-03-10T13:33:15.254+0000 7f60fa3ab640 -1 osd.6 140 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T13:33:15.955 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:15 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:33:15.596+0000 7f8c9c3b7640 -1 osd.7 140 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T13:32:45.249538+0000 front 2026-03-10T13:32:45.249508+0000 (oldest deadline 2026-03-10T13:33:10.549212+0000) 2026-03-10T13:33:15.955 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:15 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:33:15.596+0000 7f8c9c3b7640 -1 osd.7 140 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T13:32:51.049850+0000 front 2026-03-10T13:32:51.049912+0000 (oldest deadline 2026-03-10T13:33:15.149677+0000) 2026-03-10T13:33:16.270 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:33:15 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6[98722]: 2026-03-10T13:33:15.953+0000 7f60f61c3640 -1 osd.6 140 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T13:32:43.685678+0000 front 2026-03-10T13:32:43.685669+0000 (oldest deadline 2026-03-10T13:33:07.785272+0000) 2026-03-10T13:33:16.270 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:33:15 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6[98722]: 2026-03-10T13:33:15.953+0000 7f60f61c3640 -1 osd.6 140 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T13:32:47.785708+0000 front 2026-03-10T13:32:47.785713+0000 (oldest deadline 2026-03-10T13:33:13.685501+0000) 2026-03-10T13:33:17.000 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:16 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:33:16.606+0000 7f8c9c3b7640 -1 osd.7 140 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T13:32:45.249538+0000 front 2026-03-10T13:32:45.249508+0000 (oldest deadline 2026-03-10T13:33:10.549212+0000) 2026-03-10T13:33:17.000 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:16 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:33:16.606+0000 7f8c9c3b7640 -1 osd.7 140 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T13:32:51.049850+0000 front 2026-03-10T13:32:51.049912+0000 (oldest deadline 2026-03-10T13:33:15.149677+0000) 2026-03-10T13:33:17.270 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:33:16 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6[98722]: 2026-03-10T13:33:16.998+0000 7f60f61c3640 -1 osd.6 140 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T13:32:43.685678+0000 front 2026-03-10T13:32:43.685669+0000 (oldest deadline 2026-03-10T13:33:07.785272+0000) 2026-03-10T13:33:17.270 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:33:17 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6[98722]: 2026-03-10T13:33:16.998+0000 7f60f61c3640 -1 osd.6 140 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T13:32:47.785708+0000 front 2026-03-10T13:32:47.785713+0000 (oldest deadline 2026-03-10T13:33:13.685501+0000) 2026-03-10T13:33:17.287 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:33:17 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:33:17.023Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:33:17.287 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:33:17 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:33:17.024Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:33:17.951 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:17 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:33:17.629+0000 7f8c9c3b7640 -1 osd.7 140 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T13:32:45.249538+0000 front 2026-03-10T13:32:45.249508+0000 (oldest deadline 2026-03-10T13:33:10.549212+0000) 2026-03-10T13:33:17.951 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:17 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:33:17.629+0000 7f8c9c3b7640 -1 osd.7 140 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T13:32:51.049850+0000 front 2026-03-10T13:32:51.049912+0000 (oldest deadline 2026-03-10T13:33:15.149677+0000) 2026-03-10T13:33:18.270 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:33:17 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6[98722]: 2026-03-10T13:33:17.949+0000 7f60f61c3640 -1 osd.6 140 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T13:32:43.685678+0000 front 2026-03-10T13:32:43.685669+0000 (oldest deadline 2026-03-10T13:33:07.785272+0000) 2026-03-10T13:33:18.270 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:33:17 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6[98722]: 2026-03-10T13:33:17.949+0000 7f60f61c3640 -1 osd.6 140 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T13:32:47.785708+0000 front 2026-03-10T13:32:47.785713+0000 (oldest deadline 2026-03-10T13:33:13.685501+0000) 2026-03-10T13:33:18.979 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:18 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:33:18.650+0000 7f8c9c3b7640 -1 osd.7 140 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T13:32:45.249538+0000 front 2026-03-10T13:32:45.249508+0000 (oldest deadline 2026-03-10T13:33:10.549212+0000) 2026-03-10T13:33:18.979 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:18 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:33:18.650+0000 7f8c9c3b7640 -1 osd.7 140 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T13:32:51.049850+0000 front 2026-03-10T13:32:51.049912+0000 (oldest deadline 2026-03-10T13:33:15.149677+0000) 2026-03-10T13:33:19.270 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:33:18 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6[98722]: 2026-03-10T13:33:18.977+0000 7f60f61c3640 -1 osd.6 140 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T13:32:43.685678+0000 front 2026-03-10T13:32:43.685669+0000 (oldest deadline 2026-03-10T13:33:07.785272+0000) 2026-03-10T13:33:19.270 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:33:18 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6[98722]: 2026-03-10T13:33:18.977+0000 7f60f61c3640 -1 osd.6 140 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T13:32:47.785708+0000 front 2026-03-10T13:32:47.785713+0000 (oldest deadline 2026-03-10T13:33:13.685501+0000) 2026-03-10T13:33:20.020 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:19 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:33:19.693+0000 7f8c9c3b7640 -1 osd.7 140 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T13:32:45.249538+0000 front 2026-03-10T13:32:45.249508+0000 (oldest deadline 2026-03-10T13:33:10.549212+0000) 2026-03-10T13:33:20.020 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:19 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:33:19.693+0000 7f8c9c3b7640 -1 osd.7 140 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T13:32:51.049850+0000 front 2026-03-10T13:32:51.049912+0000 (oldest deadline 2026-03-10T13:33:15.149677+0000) 2026-03-10T13:33:20.296 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:33:20 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6[98722]: 2026-03-10T13:33:20.024+0000 7f60f61c3640 -1 osd.6 140 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T13:32:43.685678+0000 front 2026-03-10T13:32:43.685669+0000 (oldest deadline 2026-03-10T13:33:07.785272+0000) 2026-03-10T13:33:20.296 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:33:20 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6[98722]: 2026-03-10T13:33:20.024+0000 7f60f61c3640 -1 osd.6 140 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T13:32:47.785708+0000 front 2026-03-10T13:32:47.785713+0000 (oldest deadline 2026-03-10T13:33:13.685501+0000) 2026-03-10T13:33:20.296 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:33:20 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6[98722]: 2026-03-10T13:33:20.024+0000 7f60f61c3640 -1 osd.6 140 heartbeat_check: no reply from 192.168.123.100:6822 osd.2 since back 2026-03-10T13:32:53.686077+0000 front 2026-03-10T13:32:53.686066+0000 (oldest deadline 2026-03-10T13:33:18.985776+0000) 2026-03-10T13:33:20.565 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:33:20 vm08.local podman[110734]: 2026-03-10 13:33:20.295462593 +0000 UTC m=+5.055806205 container died 529ca6d92bc210f750ab59898d4dbafcf91309c2f3454a27cf34d159d127696e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T13:33:20.565 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:33:20 vm08.local podman[110734]: 2026-03-10 13:33:20.319757383 +0000 UTC m=+5.080100995 container remove 529ca6d92bc210f750ab59898d4dbafcf91309c2f3454a27cf34d159d127696e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-10T13:33:20.565 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:33:20 vm08.local bash[110734]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6 2026-03-10T13:33:20.565 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:33:20 vm08.local podman[110802]: 2026-03-10 13:33:20.470720131 +0000 UTC m=+0.018677005 container create ba72a6b2deec1297ba1caec5de0ec2ae8bb49bc900fd55ee196e626f087d10dd (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6-deactivate, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0) 2026-03-10T13:33:20.565 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:33:20 vm08.local podman[110802]: 2026-03-10 13:33:20.509265556 +0000 UTC m=+0.057222430 container init ba72a6b2deec1297ba1caec5de0ec2ae8bb49bc900fd55ee196e626f087d10dd (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6-deactivate, org.label-schema.license=GPLv2, CEPH_REF=squid, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T13:33:20.565 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:33:20 vm08.local podman[110802]: 2026-03-10 13:33:20.512269149 +0000 UTC m=+0.060226013 container start ba72a6b2deec1297ba1caec5de0ec2ae8bb49bc900fd55ee196e626f087d10dd (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6-deactivate, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T13:33:20.565 INFO:journalctl@ceph.osd.6.vm08.stdout:Mar 10 13:33:20 vm08.local podman[110802]: 2026-03-10 13:33:20.515420218 +0000 UTC m=+0.063377092 container attach ba72a6b2deec1297ba1caec5de0ec2ae8bb49bc900fd55ee196e626f087d10dd (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-6-deactivate, org.label-schema.schema-version=1.0, ceph=True, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2) 2026-03-10T13:33:20.697 DEBUG:teuthology.orchestra.run.vm08:> sudo pkill -f 'journalctl -f -n 0 -u ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.6.service' 2026-03-10T13:33:20.735 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T13:33:20.735 INFO:tasks.cephadm.osd.6:Stopped osd.6 2026-03-10T13:33:20.735 INFO:tasks.cephadm.osd.7:Stopping osd.7... 2026-03-10T13:33:20.735 DEBUG:teuthology.orchestra.run.vm08:> sudo systemctl stop ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.7 2026-03-10T13:33:20.885 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:20 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:33:20.659+0000 7f8c9c3b7640 -1 osd.7 140 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T13:32:45.249538+0000 front 2026-03-10T13:32:45.249508+0000 (oldest deadline 2026-03-10T13:33:10.549212+0000) 2026-03-10T13:33:20.885 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:20 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:33:20.659+0000 7f8c9c3b7640 -1 osd.7 140 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T13:32:51.049850+0000 front 2026-03-10T13:32:51.049912+0000 (oldest deadline 2026-03-10T13:33:15.149677+0000) 2026-03-10T13:33:20.885 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:20 vm08.local systemd[1]: Stopping Ceph osd.7 for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:33:21.270 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:20 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:33:20.883+0000 7f8c9fd9e640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.7 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T13:33:21.270 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:20 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:33:20.883+0000 7f8c9fd9e640 -1 osd.7 140 *** Got signal Terminated *** 2026-03-10T13:33:21.270 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:20 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:33:20.883+0000 7f8c9fd9e640 -1 osd.7 140 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T13:33:22.020 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:21 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:33:21.625+0000 7f8c9c3b7640 -1 osd.7 140 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T13:32:45.249538+0000 front 2026-03-10T13:32:45.249508+0000 (oldest deadline 2026-03-10T13:33:10.549212+0000) 2026-03-10T13:33:22.020 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:21 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:33:21.625+0000 7f8c9c3b7640 -1 osd.7 140 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T13:32:51.049850+0000 front 2026-03-10T13:32:51.049912+0000 (oldest deadline 2026-03-10T13:33:15.149677+0000) 2026-03-10T13:33:22.020 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:21 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:33:21.625+0000 7f8c9c3b7640 -1 osd.7 140 heartbeat_check: no reply from 192.168.123.100:6822 osd.2 since back 2026-03-10T13:32:55.150038+0000 front 2026-03-10T13:32:55.150192+0000 (oldest deadline 2026-03-10T13:33:21.049944+0000) 2026-03-10T13:33:23.020 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:22 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:33:22.620+0000 7f8c9c3b7640 -1 osd.7 140 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T13:32:45.249538+0000 front 2026-03-10T13:32:45.249508+0000 (oldest deadline 2026-03-10T13:33:10.549212+0000) 2026-03-10T13:33:23.020 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:22 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:33:22.620+0000 7f8c9c3b7640 -1 osd.7 140 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T13:32:51.049850+0000 front 2026-03-10T13:32:51.049912+0000 (oldest deadline 2026-03-10T13:33:15.149677+0000) 2026-03-10T13:33:23.020 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:22 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:33:22.620+0000 7f8c9c3b7640 -1 osd.7 140 heartbeat_check: no reply from 192.168.123.100:6822 osd.2 since back 2026-03-10T13:32:55.150038+0000 front 2026-03-10T13:32:55.150192+0000 (oldest deadline 2026-03-10T13:33:21.049944+0000) 2026-03-10T13:33:24.020 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:23 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:33:23.656+0000 7f8c9c3b7640 -1 osd.7 140 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T13:32:45.249538+0000 front 2026-03-10T13:32:45.249508+0000 (oldest deadline 2026-03-10T13:33:10.549212+0000) 2026-03-10T13:33:24.020 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:23 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:33:23.656+0000 7f8c9c3b7640 -1 osd.7 140 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T13:32:51.049850+0000 front 2026-03-10T13:32:51.049912+0000 (oldest deadline 2026-03-10T13:33:15.149677+0000) 2026-03-10T13:33:24.020 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:23 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:33:23.656+0000 7f8c9c3b7640 -1 osd.7 140 heartbeat_check: no reply from 192.168.123.100:6822 osd.2 since back 2026-03-10T13:32:55.150038+0000 front 2026-03-10T13:32:55.150192+0000 (oldest deadline 2026-03-10T13:33:21.049944+0000) 2026-03-10T13:33:25.020 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:24 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:33:24.608+0000 7f8c9c3b7640 -1 osd.7 140 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T13:32:45.249538+0000 front 2026-03-10T13:32:45.249508+0000 (oldest deadline 2026-03-10T13:33:10.549212+0000) 2026-03-10T13:33:25.020 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:24 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:33:24.608+0000 7f8c9c3b7640 -1 osd.7 140 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T13:32:51.049850+0000 front 2026-03-10T13:32:51.049912+0000 (oldest deadline 2026-03-10T13:33:15.149677+0000) 2026-03-10T13:33:25.020 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:24 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:33:24.608+0000 7f8c9c3b7640 -1 osd.7 140 heartbeat_check: no reply from 192.168.123.100:6822 osd.2 since back 2026-03-10T13:32:55.150038+0000 front 2026-03-10T13:32:55.150192+0000 (oldest deadline 2026-03-10T13:33:21.049944+0000) 2026-03-10T13:33:25.020 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:24 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:33:24.608+0000 7f8c9c3b7640 -1 osd.7 140 heartbeat_check: no reply from 192.168.123.100:6830 osd.3 since back 2026-03-10T13:33:01.050330+0000 front 2026-03-10T13:33:01.050410+0000 (oldest deadline 2026-03-10T13:33:23.950219+0000) 2026-03-10T13:33:25.906 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:25 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:33:25.604+0000 7f8c9c3b7640 -1 osd.7 140 heartbeat_check: no reply from 192.168.123.100:6806 osd.0 since back 2026-03-10T13:32:45.249538+0000 front 2026-03-10T13:32:45.249508+0000 (oldest deadline 2026-03-10T13:33:10.549212+0000) 2026-03-10T13:33:25.906 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:25 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:33:25.604+0000 7f8c9c3b7640 -1 osd.7 140 heartbeat_check: no reply from 192.168.123.100:6814 osd.1 since back 2026-03-10T13:32:51.049850+0000 front 2026-03-10T13:32:51.049912+0000 (oldest deadline 2026-03-10T13:33:15.149677+0000) 2026-03-10T13:33:25.906 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:25 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:33:25.604+0000 7f8c9c3b7640 -1 osd.7 140 heartbeat_check: no reply from 192.168.123.100:6822 osd.2 since back 2026-03-10T13:32:55.150038+0000 front 2026-03-10T13:32:55.150192+0000 (oldest deadline 2026-03-10T13:33:21.049944+0000) 2026-03-10T13:33:25.906 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:25 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7[102836]: 2026-03-10T13:33:25.604+0000 7f8c9c3b7640 -1 osd.7 140 heartbeat_check: no reply from 192.168.123.100:6830 osd.3 since back 2026-03-10T13:33:01.050330+0000 front 2026-03-10T13:33:01.050410+0000 (oldest deadline 2026-03-10T13:33:23.950219+0000) 2026-03-10T13:33:26.169 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:25 vm08.local podman[110900]: 2026-03-10 13:33:25.905172041 +0000 UTC m=+5.035789849 container died f59d4e9eed6e76b7c2de72f50a582c21028230834b9482c45e8260521c8e90a3 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2) 2026-03-10T13:33:26.169 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:25 vm08.local podman[110900]: 2026-03-10 13:33:25.920740755 +0000 UTC m=+5.051358552 container remove f59d4e9eed6e76b7c2de72f50a582c21028230834b9482c45e8260521c8e90a3 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3) 2026-03-10T13:33:26.169 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:25 vm08.local bash[110900]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7 2026-03-10T13:33:26.169 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:26 vm08.local podman[110968]: 2026-03-10 13:33:26.07472037 +0000 UTC m=+0.017231127 container create 949b5277dff0d255c1eeceea0d15aee995ce58e7e78c910604db0abc1d705161 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7-deactivate, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T13:33:26.169 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:26 vm08.local podman[110968]: 2026-03-10 13:33:26.11670269 +0000 UTC m=+0.059213447 container init 949b5277dff0d255c1eeceea0d15aee995ce58e7e78c910604db0abc1d705161 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7-deactivate, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_REF=squid, OSD_FLAVOR=default, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T13:33:26.169 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:26 vm08.local podman[110968]: 2026-03-10 13:33:26.119916987 +0000 UTC m=+0.062427744 container start 949b5277dff0d255c1eeceea0d15aee995ce58e7e78c910604db0abc1d705161 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7-deactivate, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, CEPH_REF=squid, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T13:33:26.169 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:26 vm08.local podman[110968]: 2026-03-10 13:33:26.12336435 +0000 UTC m=+0.065875107 container attach 949b5277dff0d255c1eeceea0d15aee995ce58e7e78c910604db0abc1d705161 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-osd-7-deactivate, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, CEPH_REF=squid, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0) 2026-03-10T13:33:26.169 INFO:journalctl@ceph.osd.7.vm08.stdout:Mar 10 13:33:26 vm08.local podman[110968]: 2026-03-10 13:33:26.068375032 +0000 UTC m=+0.010885799 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T13:33:26.280 DEBUG:teuthology.orchestra.run.vm08:> sudo pkill -f 'journalctl -f -n 0 -u ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@osd.7.service' 2026-03-10T13:33:26.316 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T13:33:26.316 INFO:tasks.cephadm.osd.7:Stopped osd.7 2026-03-10T13:33:26.316 INFO:tasks.cephadm.prometheus.a:Stopping prometheus.a... 2026-03-10T13:33:26.316 DEBUG:teuthology.orchestra.run.vm08:> sudo systemctl stop ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@prometheus.a 2026-03-10T13:33:26.478 INFO:journalctl@ceph.prometheus.a.vm08.stdout:Mar 10 13:33:26 vm08.local systemd[1]: Stopping Ceph prometheus.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:33:26.576 DEBUG:teuthology.orchestra.run.vm08:> sudo pkill -f 'journalctl -f -n 0 -u ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@prometheus.a.service' 2026-03-10T13:33:26.611 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T13:33:26.611 INFO:tasks.cephadm.prometheus.a:Stopped prometheus.a 2026-03-10T13:33:26.611 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 --force --keep-logs 2026-03-10T13:33:27.252 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:33:27 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:33:27.024Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:33:27.253 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:33:27 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:33:27.026Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T13:33:28.347 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:33:28 vm00.local systemd[1]: Stopping Ceph node-exporter.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:33:28.348 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:33:28 vm00.local podman[130248]: 2026-03-10 13:33:28.107077882 +0000 UTC m=+0.026626675 container died bcf8834016191e2ea6ad1604235b1fe114e314c779dc2dd6ccb2b81c4a7bb9d7 (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-10T13:33:28.348 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:33:28 vm00.local podman[130248]: 2026-03-10 13:33:28.123184441 +0000 UTC m=+0.042733234 container remove bcf8834016191e2ea6ad1604235b1fe114e314c779dc2dd6ccb2b81c4a7bb9d7 (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-10T13:33:28.348 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:33:28 vm00.local bash[130248]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-a 2026-03-10T13:33:28.348 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:33:28 vm00.local systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@node-exporter.a.service: Main process exited, code=exited, status=143/n/a 2026-03-10T13:33:28.348 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:33:28 vm00.local systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@node-exporter.a.service: Failed with result 'exit-code'. 2026-03-10T13:33:28.348 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:33:28 vm00.local systemd[1]: Stopped Ceph node-exporter.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:33:28.348 INFO:journalctl@ceph.node-exporter.a.vm00.stdout:Mar 10 13:33:28 vm00.local systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@node-exporter.a.service: Consumed 1.629s CPU time. 2026-03-10T13:33:28.348 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:33:28 vm00.local systemd[1]: Stopping Ceph alertmanager.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:33:28.673 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:33:28 vm00.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a[78920]: ts=2026-03-10T13:33:28.416Z caller=main.go:583 level=info msg="Received SIGTERM, exiting gracefully..." 2026-03-10T13:33:28.673 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:33:28 vm00.local podman[130342]: 2026-03-10 13:33:28.427229494 +0000 UTC m=+0.025416428 container died 12fde3cf83cba9d0a6f9479be80ea8d92beb092408b37b172a4c5a573bffc836 (image=quay.io/prometheus/alertmanager:v0.25.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-10T13:33:28.673 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:33:28 vm00.local podman[130342]: 2026-03-10 13:33:28.441801051 +0000 UTC m=+0.039987985 container remove 12fde3cf83cba9d0a6f9479be80ea8d92beb092408b37b172a4c5a573bffc836 (image=quay.io/prometheus/alertmanager:v0.25.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-10T13:33:28.673 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:33:28 vm00.local podman[130342]: 2026-03-10 13:33:28.44320512 +0000 UTC m=+0.041392074 volume remove a1974002b31b7238b1363445cdca72ed4a4069cb926b431b6dd700dd9d26e8e0 2026-03-10T13:33:28.673 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:33:28 vm00.local bash[130342]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-alertmanager-a 2026-03-10T13:33:28.673 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:33:28 vm00.local systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@alertmanager.a.service: Deactivated successfully. 2026-03-10T13:33:28.673 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:33:28 vm00.local systemd[1]: Stopped Ceph alertmanager.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:33:28.673 INFO:journalctl@ceph.alertmanager.a.vm00.stdout:Mar 10 13:33:28 vm00.local systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@alertmanager.a.service: Consumed 1.515s CPU time. 2026-03-10T13:33:49.596 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 --force --keep-logs 2026-03-10T13:33:50.840 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:33:50 vm08.local systemd[1]: Stopping Ceph node-exporter.b for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:33:50.840 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:33:50 vm08.local podman[111432]: 2026-03-10 13:33:50.824704898 +0000 UTC m=+0.038314796 container died 4ac83f03f8180efcc27acff61b1ff929b556230bc7f90fd9f7e2ad964eb064d2 (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-10T13:33:51.170 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:33:50 vm08.local podman[111432]: 2026-03-10 13:33:50.845793124 +0000 UTC m=+0.059403022 container remove 4ac83f03f8180efcc27acff61b1ff929b556230bc7f90fd9f7e2ad964eb064d2 (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-10T13:33:51.170 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:33:50 vm08.local bash[111432]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-node-exporter-b 2026-03-10T13:33:51.170 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:33:50 vm08.local systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@node-exporter.b.service: Main process exited, code=exited, status=143/n/a 2026-03-10T13:33:51.170 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:33:50 vm08.local systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@node-exporter.b.service: Failed with result 'exit-code'. 2026-03-10T13:33:51.170 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:33:50 vm08.local systemd[1]: Stopped Ceph node-exporter.b for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:33:51.170 INFO:journalctl@ceph.node-exporter.b.vm08.stdout:Mar 10 13:33:50 vm08.local systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@node-exporter.b.service: Consumed 1.627s CPU time. 2026-03-10T13:33:51.520 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:33:51 vm08.local systemd[1]: Stopping Ceph grafana.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5... 2026-03-10T13:33:51.520 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:33:51 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=server t=2026-03-10T13:33:51.356886315Z level=info msg="Shutdown started" reason="System signal: terminated" 2026-03-10T13:33:51.520 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:33:51 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=tracing t=2026-03-10T13:33:51.357519751Z level=info msg="Closing tracing" 2026-03-10T13:33:51.520 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:33:51 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=ticker t=2026-03-10T13:33:51.35774897Z level=info msg=stopped last_tick=2026-03-10T13:33:50Z 2026-03-10T13:33:51.520 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:33:51 vm08.local ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a[81021]: logger=grafana-apiserver t=2026-03-10T13:33:51.357889412Z level=info msg="StorageObjectCountTracker pruner is exiting" 2026-03-10T13:33:51.521 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:33:51 vm08.local podman[111571]: 2026-03-10 13:33:51.366529447 +0000 UTC m=+0.036923430 container died 960e32589e98212c79bf72d38fe81dc1625621590d58979c78e62827d5495306 (image=quay.io/ceph/grafana:10.4.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a, maintainer=Grafana Labs ) 2026-03-10T13:33:51.521 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:33:51 vm08.local podman[111571]: 2026-03-10 13:33:51.408471 +0000 UTC m=+0.078864983 container remove 960e32589e98212c79bf72d38fe81dc1625621590d58979c78e62827d5495306 (image=quay.io/ceph/grafana:10.4.0, name=ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a, maintainer=Grafana Labs ) 2026-03-10T13:33:51.521 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:33:51 vm08.local bash[111571]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5-grafana-a 2026-03-10T13:33:51.521 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:33:51 vm08.local systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@grafana.a.service: Deactivated successfully. 2026-03-10T13:33:51.521 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:33:51 vm08.local systemd[1]: Stopped Ceph grafana.a for 98a3dada-1c81-11f1-89c9-d57c120f78d5. 2026-03-10T13:33:51.521 INFO:journalctl@ceph.grafana.a.vm08.stdout:Mar 10 13:33:51 vm08.local systemd[1]: ceph-98a3dada-1c81-11f1-89c9-d57c120f78d5@grafana.a.service: Consumed 3.826s CPU time. 2026-03-10T13:34:02.342 DEBUG:teuthology.orchestra.run.vm00:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-10T13:34:02.371 DEBUG:teuthology.orchestra.run.vm08:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-10T13:34:02.400 INFO:tasks.cephadm:Archiving crash dumps... 2026-03-10T13:34:02.400 DEBUG:teuthology.misc:Transferring archived files from vm00:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/crash to /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/1040/remote/vm00/crash 2026-03-10T13:34:02.400 DEBUG:teuthology.orchestra.run.vm00:> sudo tar c -f - -C /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/crash -- . 2026-03-10T13:34:02.437 INFO:teuthology.orchestra.run.vm00.stderr:tar: /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/crash: Cannot open: No such file or directory 2026-03-10T13:34:02.437 INFO:teuthology.orchestra.run.vm00.stderr:tar: Error is not recoverable: exiting now 2026-03-10T13:34:02.438 DEBUG:teuthology.misc:Transferring archived files from vm08:/var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/crash to /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/1040/remote/vm08/crash 2026-03-10T13:34:02.439 DEBUG:teuthology.orchestra.run.vm08:> sudo tar c -f - -C /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/crash -- . 2026-03-10T13:34:02.466 INFO:teuthology.orchestra.run.vm08.stderr:tar: /var/lib/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/crash: Cannot open: No such file or directory 2026-03-10T13:34:02.466 INFO:teuthology.orchestra.run.vm08.stderr:tar: Error is not recoverable: exiting now 2026-03-10T13:34:02.468 INFO:tasks.cephadm:Checking cluster log for badness... 2026-03-10T13:34:02.468 DEBUG:teuthology.orchestra.run.vm00:> sudo egrep '\[ERR\]|\[WRN\]|\[SEC\]' /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph.log | egrep CEPHADM_ | egrep -v '\(MDS_ALL_DOWN\)' | egrep -v '\(MDS_UP_LESS_THAN_MAX\)' | egrep -v CEPHADM_STRAY_DAEMON | egrep -v CEPHADM_FAILED_DAEMON | egrep -v CEPHADM_AGENT_DOWN | head -n 1 2026-03-10T13:34:02.516 INFO:tasks.cephadm:Compressing logs... 2026-03-10T13:34:02.516 DEBUG:teuthology.orchestra.run.vm00:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-03-10T13:34:02.559 DEBUG:teuthology.orchestra.run.vm08:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-03-10T13:34:02.582 INFO:teuthology.orchestra.run.vm00.stderr:find: ‘/var/log/rbd-target-api’: No such file or directory 2026-03-10T13:34:02.582 INFO:teuthology.orchestra.run.vm00.stderr:gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-03-10T13:34:02.582 INFO:teuthology.orchestra.run.vm00.stderr:gzip -5 --verbose -- /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-mon.a.log 2026-03-10T13:34:02.583 INFO:teuthology.orchestra.run.vm08.stderr:find: ‘/var/log/rbd-target-api’: No such file or directory 2026-03-10T13:34:02.583 INFO:teuthology.orchestra.run.vm00.stderr:/var/log/ceph/cephadm.log: gzip -5 --verbose -- /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph.log 2026-03-10T13:34:02.583 INFO:teuthology.orchestra.run.vm08.stderr:gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-03-10T13:34:02.584 INFO:teuthology.orchestra.run.vm08.stderr:gzip -5 --verbose -- /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-volume.log 2026-03-10T13:34:02.584 INFO:teuthology.orchestra.run.vm08.stderr:/var/log/ceph/cephadm.log: gzip -5 --verbose -- /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-mon.b.log 2026-03-10T13:34:02.590 INFO:teuthology.orchestra.run.vm00.stderr:/var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-mon.a.log: gzip -5 --verbose -- /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-mgr.y.log 2026-03-10T13:34:02.590 INFO:teuthology.orchestra.run.vm08.stderr:/var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-volume.log: gzip -5 --verbose -- /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph.cephadm.log 2026-03-10T13:34:02.592 INFO:teuthology.orchestra.run.vm08.stderr:/var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-mon.b.log: 94.1% -- replaced with /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-volume.log.gz 2026-03-10T13:34:02.593 INFO:teuthology.orchestra.run.vm00.stderr:/var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph.log: 93.7% 92.1% -- replaced with /var/log/ceph/cephadm.log.gz 2026-03-10T13:34:02.593 INFO:teuthology.orchestra.run.vm00.stderr: -- replaced with /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph.log.gz 2026-03-10T13:34:02.593 INFO:teuthology.orchestra.run.vm00.stderr:gzip -5 --verbose -- /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph.audit.log 2026-03-10T13:34:02.596 INFO:teuthology.orchestra.run.vm08.stderr:gzip -5 --verbose -- /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph.audit.log 2026-03-10T13:34:02.598 INFO:teuthology.orchestra.run.vm08.stderr:/var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph.cephadm.log: 86.4% -- replaced with /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph.cephadm.log.gz 2026-03-10T13:34:02.599 INFO:teuthology.orchestra.run.vm00.stderr:/var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-mgr.y.log: gzip -5 --verbose -- /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph.cephadm.log 2026-03-10T13:34:02.601 INFO:teuthology.orchestra.run.vm08.stderr:gzip -5 --verbose -- /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph.log 2026-03-10T13:34:02.602 INFO:teuthology.orchestra.run.vm08.stderr:/var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph.audit.log: 91.0% -- replaced with /var/log/ceph/cephadm.log.gz 2026-03-10T13:34:02.603 INFO:teuthology.orchestra.run.vm08.stderr:gzip -5 --verbose -- /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-mgr.x.log 2026-03-10T13:34:02.606 INFO:teuthology.orchestra.run.vm08.stderr:/var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph.log: 88.6% -- replaced with /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph.log.gz 2026-03-10T13:34:02.606 INFO:teuthology.orchestra.run.vm08.stderr:gzip -5 --verbose -- /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-osd.4.log 2026-03-10T13:34:02.608 INFO:teuthology.orchestra.run.vm08.stderr:/var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-mgr.x.log: 91.0% -- replaced with /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph.audit.log.gz 2026-03-10T13:34:02.609 INFO:teuthology.orchestra.run.vm00.stderr:/var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph.audit.log: gzip -5 --verbose -- /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-volume.log 2026-03-10T13:34:02.611 INFO:teuthology.orchestra.run.vm00.stderr:/var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph.cephadm.log: 92.2% 94.4% -- replaced with /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph.audit.log.gz 2026-03-10T13:34:02.611 INFO:teuthology.orchestra.run.vm00.stderr: -- replaced with /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph.cephadm.log.gz 2026-03-10T13:34:02.615 INFO:teuthology.orchestra.run.vm00.stderr:gzip -5 --verbose -- /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-mon.c.log 2026-03-10T13:34:02.616 INFO:teuthology.orchestra.run.vm08.stderr:gzip -5 --verbose -- /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-osd.5.log 2026-03-10T13:34:02.620 INFO:teuthology.orchestra.run.vm08.stderr:/var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-osd.4.log: gzip -5 --verbose -- /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-osd.6.log 2026-03-10T13:34:02.628 INFO:teuthology.orchestra.run.vm00.stderr:/var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-volume.log: gzip -5 --verbose -- /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-osd.0.log 2026-03-10T13:34:02.628 INFO:teuthology.orchestra.run.vm08.stderr:/var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-osd.5.log: gzip -5 --verbose -- /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-osd.7.log 2026-03-10T13:34:02.633 INFO:teuthology.orchestra.run.vm08.stderr:/var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-osd.6.log: gzip -5 --verbose -- /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-client.rgw.foo.vm08.ljayps.log 2026-03-10T13:34:02.633 INFO:teuthology.orchestra.run.vm00.stderr:/var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-mon.c.log: gzip -5 --verbose -- /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-osd.1.log 2026-03-10T13:34:02.633 INFO:teuthology.orchestra.run.vm00.stderr: 94.3%/var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-osd.0.log: -- replaced with /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-volume.log.gz 2026-03-10T13:34:02.644 INFO:teuthology.orchestra.run.vm00.stderr:gzip -5 --verbose -- /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-osd.2.log 2026-03-10T13:34:02.648 INFO:teuthology.orchestra.run.vm08.stderr:/var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-osd.7.log: /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-client.rgw.foo.vm08.ljayps.log: 75.7% -- replaced with /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-client.rgw.foo.vm08.ljayps.log.gz 2026-03-10T13:34:02.652 INFO:teuthology.orchestra.run.vm00.stderr:/var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-osd.1.log: gzip -5 --verbose -- /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-osd.3.log 2026-03-10T13:34:02.660 INFO:teuthology.orchestra.run.vm00.stderr:/var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-osd.2.log: gzip -5 --verbose -- /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-client.rgw.foo.vm00.tvlvzo.log 2026-03-10T13:34:02.671 INFO:teuthology.orchestra.run.vm00.stderr:/var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-osd.3.log: gzip -5 --verbose -- /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/tcmu-runner.log 2026-03-10T13:34:02.675 INFO:teuthology.orchestra.run.vm00.stderr:/var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-client.rgw.foo.vm00.tvlvzo.log: 76.3% -- replaced with /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-client.rgw.foo.vm00.tvlvzo.log.gz 2026-03-10T13:34:02.676 INFO:teuthology.orchestra.run.vm00.stderr:/var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/tcmu-runner.log: 87.1% -- replaced with /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/tcmu-runner.log.gz 2026-03-10T13:34:02.799 INFO:teuthology.orchestra.run.vm08.stderr: 90.3% -- replaced with /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-mgr.x.log.gz 2026-03-10T13:34:03.207 INFO:teuthology.orchestra.run.vm00.stderr: 90.0% -- replaced with /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-mgr.y.log.gz 2026-03-10T13:34:03.529 INFO:teuthology.orchestra.run.vm00.stderr: 92.6% -- replaced with /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-mon.c.log.gz 2026-03-10T13:34:03.554 INFO:teuthology.orchestra.run.vm08.stderr: 92.3% -- replaced with /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-mon.b.log.gz 2026-03-10T13:34:04.410 INFO:teuthology.orchestra.run.vm00.stderr: 93.7% -- replaced with /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-osd.2.log.gz 2026-03-10T13:34:04.419 INFO:teuthology.orchestra.run.vm08.stderr: 93.5% -- replaced with /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-osd.6.log.gz 2026-03-10T13:34:04.481 INFO:teuthology.orchestra.run.vm00.stderr: 91.2% -- replaced with /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-mon.a.log.gz 2026-03-10T13:34:04.661 INFO:teuthology.orchestra.run.vm08.stderr: 93.7% -- replaced with /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-osd.5.log.gz 2026-03-10T13:34:04.731 INFO:teuthology.orchestra.run.vm08.stderr: 94.1% -- replaced with /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-osd.7.log.gz 2026-03-10T13:34:04.856 INFO:teuthology.orchestra.run.vm08.stderr: 93.8% -- replaced with /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-osd.4.log.gz 2026-03-10T13:34:04.858 INFO:teuthology.orchestra.run.vm08.stderr: 2026-03-10T13:34:04.858 INFO:teuthology.orchestra.run.vm08.stderr:real 0m2.285s 2026-03-10T13:34:04.858 INFO:teuthology.orchestra.run.vm08.stderr:user 0m4.203s 2026-03-10T13:34:04.858 INFO:teuthology.orchestra.run.vm08.stderr:sys 0m0.223s 2026-03-10T13:34:04.952 INFO:teuthology.orchestra.run.vm00.stderr: 93.8% -- replaced with /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-osd.0.log.gz 2026-03-10T13:34:05.002 INFO:teuthology.orchestra.run.vm00.stderr: 93.8% -- replaced with /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-osd.1.log.gz 2026-03-10T13:34:05.120 INFO:teuthology.orchestra.run.vm00.stderr: 93.8% -- replaced with /var/log/ceph/98a3dada-1c81-11f1-89c9-d57c120f78d5/ceph-osd.3.log.gz 2026-03-10T13:34:05.122 INFO:teuthology.orchestra.run.vm00.stderr: 2026-03-10T13:34:05.122 INFO:teuthology.orchestra.run.vm00.stderr:real 0m2.550s 2026-03-10T13:34:05.122 INFO:teuthology.orchestra.run.vm00.stderr:user 0m4.716s 2026-03-10T13:34:05.122 INFO:teuthology.orchestra.run.vm00.stderr:sys 0m0.247s 2026-03-10T13:34:05.122 INFO:tasks.cephadm:Archiving logs... 2026-03-10T13:34:05.123 DEBUG:teuthology.misc:Transferring archived files from vm00:/var/log/ceph to /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/1040/remote/vm00/log 2026-03-10T13:34:05.123 DEBUG:teuthology.orchestra.run.vm00:> sudo tar c -f - -C /var/log/ceph -- . 2026-03-10T13:34:05.432 DEBUG:teuthology.misc:Transferring archived files from vm08:/var/log/ceph to /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/1040/remote/vm08/log 2026-03-10T13:34:05.432 DEBUG:teuthology.orchestra.run.vm08:> sudo tar c -f - -C /var/log/ceph -- . 2026-03-10T13:34:05.670 INFO:tasks.cephadm:Removing cluster... 2026-03-10T13:34:05.670 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 --force 2026-03-10T13:34:05.892 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 98a3dada-1c81-11f1-89c9-d57c120f78d5 --force 2026-03-10T13:34:06.113 INFO:tasks.cephadm:Removing cephadm ... 2026-03-10T13:34:06.113 DEBUG:teuthology.orchestra.run.vm00:> rm -rf /home/ubuntu/cephtest/cephadm 2026-03-10T13:34:06.130 DEBUG:teuthology.orchestra.run.vm08:> rm -rf /home/ubuntu/cephtest/cephadm 2026-03-10T13:34:06.145 INFO:tasks.cephadm:Teardown complete 2026-03-10T13:34:06.145 DEBUG:teuthology.run_tasks:Unwinding manager clock 2026-03-10T13:34:06.147 INFO:teuthology.task.clock:Checking final clock skew... 2026-03-10T13:34:06.147 DEBUG:teuthology.orchestra.run.vm00:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-10T13:34:06.171 DEBUG:teuthology.orchestra.run.vm08:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-10T13:34:06.186 INFO:teuthology.orchestra.run.vm00.stderr:bash: line 1: ntpq: command not found 2026-03-10T13:34:06.192 INFO:teuthology.orchestra.run.vm00.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-10T13:34:06.192 INFO:teuthology.orchestra.run.vm00.stdout:=============================================================================== 2026-03-10T13:34:06.192 INFO:teuthology.orchestra.run.vm00.stdout:^- ns.gunnarhofmann.de 2 8 377 187 -1061us[-1067us] +/- 37ms 2026-03-10T13:34:06.192 INFO:teuthology.orchestra.run.vm00.stdout:^- www.h4x-gamers.top 2 8 377 59 -993us[-1000us] +/- 47ms 2026-03-10T13:34:06.192 INFO:teuthology.orchestra.run.vm00.stdout:^* vps-fra2.orleans.ddnss.de 4 7 377 58 -1109us[-1116us] +/- 11ms 2026-03-10T13:34:06.192 INFO:teuthology.orchestra.run.vm00.stdout:^+ srv01-nc.securepod.org 2 7 377 122 +1933us[+1926us] +/- 19ms 2026-03-10T13:34:06.200 INFO:teuthology.orchestra.run.vm08.stderr:bash: line 1: ntpq: command not found 2026-03-10T13:34:06.204 INFO:teuthology.orchestra.run.vm08.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-10T13:34:06.204 INFO:teuthology.orchestra.run.vm08.stdout:=============================================================================== 2026-03-10T13:34:06.204 INFO:teuthology.orchestra.run.vm08.stdout:^+ srv01-nc.securepod.org 2 6 377 120 +1969us[+1969us] +/- 19ms 2026-03-10T13:34:06.204 INFO:teuthology.orchestra.run.vm08.stdout:^- ns.gunnarhofmann.de 2 8 377 185 -1086us[-1096us] +/- 37ms 2026-03-10T13:34:06.204 INFO:teuthology.orchestra.run.vm08.stdout:^- www.h4x-gamers.top 2 7 377 60 -1046us[-1046us] +/- 47ms 2026-03-10T13:34:06.204 INFO:teuthology.orchestra.run.vm08.stdout:^* vps-fra2.orleans.ddnss.de 4 8 377 125 -913us[ -924us] +/- 11ms 2026-03-10T13:34:06.204 DEBUG:teuthology.run_tasks:Unwinding manager ansible.cephlab 2026-03-10T13:34:06.207 INFO:teuthology.task.ansible:Skipping ansible cleanup... 2026-03-10T13:34:06.207 DEBUG:teuthology.run_tasks:Unwinding manager selinux 2026-03-10T13:34:06.209 DEBUG:teuthology.run_tasks:Unwinding manager pcp 2026-03-10T13:34:06.211 DEBUG:teuthology.run_tasks:Unwinding manager internal.timer 2026-03-10T13:34:06.213 INFO:teuthology.task.internal:Duration was 1941.667540 seconds 2026-03-10T13:34:06.213 DEBUG:teuthology.run_tasks:Unwinding manager internal.syslog 2026-03-10T13:34:06.215 INFO:teuthology.task.internal.syslog:Shutting down syslog monitoring... 2026-03-10T13:34:06.215 DEBUG:teuthology.orchestra.run.vm00:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-10T13:34:06.235 DEBUG:teuthology.orchestra.run.vm08:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-10T13:34:06.274 INFO:teuthology.orchestra.run.vm00.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-10T13:34:06.288 INFO:teuthology.orchestra.run.vm08.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-10T13:34:06.780 INFO:teuthology.task.internal.syslog:Checking logs for errors... 2026-03-10T13:34:06.780 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm00.local 2026-03-10T13:34:06.780 DEBUG:teuthology.orchestra.run.vm00:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-10T13:34:06.849 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm08.local 2026-03-10T13:34:06.849 DEBUG:teuthology.orchestra.run.vm08:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-10T13:34:06.876 INFO:teuthology.task.internal.syslog:Gathering journactl... 2026-03-10T13:34:06.876 DEBUG:teuthology.orchestra.run.vm00:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-10T13:34:06.890 DEBUG:teuthology.orchestra.run.vm08:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-10T13:34:07.497 INFO:teuthology.task.internal.syslog:Compressing syslogs... 2026-03-10T13:34:07.497 DEBUG:teuthology.orchestra.run.vm00:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-10T13:34:07.499 DEBUG:teuthology.orchestra.run.vm08:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-10T13:34:07.521 INFO:teuthology.orchestra.run.vm08.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-10T13:34:07.522 INFO:teuthology.orchestra.run.vm08.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-10T13:34:07.522 INFO:teuthology.orchestra.run.vm08.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-10T13:34:07.522 INFO:teuthology.orchestra.run.vm08.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-10T13:34:07.522 INFO:teuthology.orchestra.run.vm08.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-10T13:34:07.523 INFO:teuthology.orchestra.run.vm00.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-10T13:34:07.523 INFO:teuthology.orchestra.run.vm00.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-10T13:34:07.523 INFO:teuthology.orchestra.run.vm00.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-10T13:34:07.523 INFO:teuthology.orchestra.run.vm00.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-10T13:34:07.524 INFO:teuthology.orchestra.run.vm00.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-10T13:34:07.679 INFO:teuthology.orchestra.run.vm08.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 97.7% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-10T13:34:07.699 INFO:teuthology.orchestra.run.vm00.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 96.8% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-10T13:34:07.701 DEBUG:teuthology.run_tasks:Unwinding manager internal.sudo 2026-03-10T13:34:07.704 INFO:teuthology.task.internal:Restoring /etc/sudoers... 2026-03-10T13:34:07.704 DEBUG:teuthology.orchestra.run.vm00:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-10T13:34:07.766 DEBUG:teuthology.orchestra.run.vm08:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-10T13:34:07.790 DEBUG:teuthology.run_tasks:Unwinding manager internal.coredump 2026-03-10T13:34:07.793 DEBUG:teuthology.orchestra.run.vm00:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-10T13:34:07.808 DEBUG:teuthology.orchestra.run.vm08:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-10T13:34:07.832 INFO:teuthology.orchestra.run.vm00.stdout:kernel.core_pattern = core 2026-03-10T13:34:07.859 INFO:teuthology.orchestra.run.vm08.stdout:kernel.core_pattern = core 2026-03-10T13:34:07.909 DEBUG:teuthology.orchestra.run.vm00:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-10T13:34:07.924 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T13:34:07.924 DEBUG:teuthology.orchestra.run.vm08:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-10T13:34:07.965 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T13:34:07.965 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive 2026-03-10T13:34:07.968 INFO:teuthology.task.internal:Transferring archived files... 2026-03-10T13:34:07.968 DEBUG:teuthology.misc:Transferring archived files from vm00:/home/ubuntu/cephtest/archive to /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/1040/remote/vm00 2026-03-10T13:34:07.968 DEBUG:teuthology.orchestra.run.vm00:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-10T13:34:08.000 DEBUG:teuthology.misc:Transferring archived files from vm08:/home/ubuntu/cephtest/archive to /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/1040/remote/vm08 2026-03-10T13:34:08.000 DEBUG:teuthology.orchestra.run.vm08:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-10T13:34:08.034 INFO:teuthology.task.internal:Removing archive directory... 2026-03-10T13:34:08.034 DEBUG:teuthology.orchestra.run.vm00:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-10T13:34:08.041 DEBUG:teuthology.orchestra.run.vm08:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-10T13:34:08.089 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive_upload 2026-03-10T13:34:08.092 INFO:teuthology.task.internal:Not uploading archives. 2026-03-10T13:34:08.092 DEBUG:teuthology.run_tasks:Unwinding manager internal.base 2026-03-10T13:34:08.094 INFO:teuthology.task.internal:Tidying up after the test... 2026-03-10T13:34:08.094 DEBUG:teuthology.orchestra.run.vm00:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-10T13:34:08.096 DEBUG:teuthology.orchestra.run.vm08:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-10T13:34:08.113 INFO:teuthology.orchestra.run.vm00.stdout: 8532144 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 10 13:34 /home/ubuntu/cephtest 2026-03-10T13:34:08.146 INFO:teuthology.orchestra.run.vm08.stdout: 8532145 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 10 13:34 /home/ubuntu/cephtest 2026-03-10T13:34:08.147 DEBUG:teuthology.run_tasks:Unwinding manager console_log 2026-03-10T13:34:08.152 INFO:teuthology.run:Summary data: description: orch/cephadm/upgrade/{1-start-distro/1-start-centos_9.stream 2-repo_digest/repo_digest 3-upgrade/staggered 4-wait 5-upgrade-ls agent/off mon_election/classic} duration: 1941.6675398349762 owner: kyr success: true 2026-03-10T13:34:08.153 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-10T13:34:08.174 INFO:teuthology.run:pass