2026-03-07T10:06:20.205 INFO:root:teuthology version: 1.2.4.dev6+g1c580df7a 2026-03-07T10:06:20.210 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-07T10:06:20.233 INFO:teuthology.run:Config: archive_path: /archive/irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/10 branch: cobaltcore-storage-v19.2.3-fasttrack-5 description: orch:cephadm:workunits/{0-distro/centos_9.stream agent/on mon_election/connectivity task/test_host_drain} email: null first_in_suite: false flavor: default job_id: '10' last_in_suite: false machine_type: vps name: irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps no_nested_subset: false os_type: centos os_version: 9.stream overrides: admin_socket: branch: cobaltcore-storage-v19.2.3-fasttrack-5 ansible.cephlab: branch: main repo: https://github.com/kshtsk/ceph-cm-ansible.git skip_tags: nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs vars: timezone: UTC ceph: conf: global: mon election default strategy: 3 mgr: debug mgr: 20 debug ms: 1 mgr/cephadm/use_agent: true mon: debug mon: 20 debug ms: 1 debug paxos: 20 osd: debug ms: 1 debug osd: 20 osd mclock iops capacity threshold hdd: 49000 flavor: default log-ignorelist: - \(MDS_ALL_DOWN\) - \(MDS_UP_LESS_THAN_MAX\) - MON_DOWN - mons down - mon down - out of quorum - CEPHADM_STRAY_HOST - CEPHADM_STRAY_DAEMON - CEPHADM_FAILED_DAEMON log-only-match: - CEPHADM_ sha1: 340d3c24fc6ae7529322dc7ccee6c6cb2589da0a ceph-deploy: conf: client: log file: /var/log/ceph/ceph-$name.$pid.log mon: {} cephadm: cephadm_binary_url: https://download.ceph.com/rpm-19.2.3/el9/noarch/cephadm containers: image: harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 install: ceph: flavor: default sha1: 340d3c24fc6ae7529322dc7ccee6c6cb2589da0a extra_system_packages: deb: - python3-xmltodict - s3cmd rpm: - bzip2 - perl-Test-Harness - python3-xmltodict - s3cmd repos: - name: ceph-source priority: 1 url: https://s3.clyso.com/ces-packages/components/ceph/rpm-19.2.3-39-g340d3c24fc6/el9.clyso/SRPMS - name: ceph-noarch priority: 1 url: https://s3.clyso.com/ces-packages/components/ceph/rpm-19.2.3-39-g340d3c24fc6/el9.clyso/noarch - name: ceph priority: 1 url: https://s3.clyso.com/ces-packages/components/ceph/rpm-19.2.3-39-g340d3c24fc6/el9.clyso/x86_64 selinux: allowlist: - scontext=system_u:system_r:logrotate_t:s0 - scontext=system_u:system_r:getty_t:s0 workunit: branch: tt-fasttrack-5-workunits sha1: f96e33505a05da25eb24b46ae34fbbd1718a702b owner: irq0 priority: 1000 repo: https://github.com/ceph/ceph.git roles: - - host.a - mon.a - mgr.a - osd.0 - osd.1 - - host.b - mon.b - mgr.b - osd.2 - osd.3 - - host.c - mon.c - osd.4 - osd.5 seed: 8363 sha1: 340d3c24fc6ae7529322dc7ccee6c6cb2589da0a sleep_before_teardown: 0 subset: 1/64 suite: orch:cephadm:workunits suite_branch: tt-fasttrack-5-workunits suite_path: /home/teuthos/src/github.com_kshtsk_ceph_f96e33505a05da25eb24b46ae34fbbd1718a702b/qa suite_relpath: qa suite_repo: https://github.com/kshtsk/ceph.git suite_sha1: f96e33505a05da25eb24b46ae34fbbd1718a702b targets: vm01.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBBI0gV1mXoWqxwHZ7kVKoQs0nE+2LQ+M8MOa5O4NX/KEQbdbgqr1NqZ0vZQQat+MWMHbMtEjKmfmqyxhiNA6aUQ= vm04.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBArEyZT3Y3HawOhf365kL7KuhAUYmgz9Z0n/9FJrE8zyi+jojfodY+iS9WPIthfkG+NBSHYYskmKU+o95rbqNiI= vm07.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNwUgCSVXpLtdEzkll7Gr/Ag7HDAxMa3Kj5lU6DJVMiHg2sb1aIR7qkosoDQM/13It0RhQ1qte8n+Blvxmoh9ac= tasks: - pexec: all: - sudo dnf remove nvme-cli -y - sudo dnf install nvmetcli nvme-cli -y - cephadm: null - cephadm.shell: host.a: - "set -ex\nHOSTNAMES=$(ceph orch host ls --format json | jq -r '.[] | .hostname')\n\ for host in $HOSTNAMES; do\n # find the hostname for \"host.c\" which will\ \ have no mgr\n HAS_MGRS=$(ceph orch ps --hostname ${host} --format json |\ \ jq 'any(.daemon_type == \"mgr\")')\n if [ \"$HAS_MGRS\" == \"false\" ]; then\n\ \ HOST_C=\"${host}\"\n fi\ndone\n# One last thing to worry about before\ \ draining the host\n# is that the teuthology test tends to put the explicit\n\ # hostnames in the placement for the mon service.\n# We want to make sure we\ \ can drain without providing\n# --force and there is a check for the host being\ \ removed\n# being listed explicitly in the placements. Therefore,\n# we should\ \ remove it from the mon placement.\nceph orch ls mon --export > mon.yaml\n\ sed /\"$HOST_C\"/d mon.yaml > mon_adjusted.yaml\nceph orch apply -i mon_adjusted.yaml\n\ # now drain that host\nceph orch host drain $HOST_C --zap-osd-devices\n# wait\ \ for drain to complete\nHOST_C_DAEMONS=$(ceph orch ps --hostname $HOST_C)\n\ while [ \"$HOST_C_DAEMONS\" != \"No daemons reported\" ]; do\n sleep 15\n \ \ HOST_C_DAEMONS=$(ceph orch ps --hostname $HOST_C)\ndone\n# we want to check\ \ the ability to remove the host from\n# the CRUSH map, so we should first verify\ \ the host is in\n# the CRUSH map.\nceph osd getcrushmap -o compiled-crushmap\n\ crushtool -d compiled-crushmap -o crushmap.txt\nCRUSH_MAP=$(cat crushmap.txt)\n\ if ! grep -q \"$HOST_C\" <<< \"$CRUSH_MAP\"; then\n printf \"Expected to see\ \ $HOST_C in CRUSH map. Saw:\\n\\n$CRUSH_MAP\"\n exit 1\nfi\n# If the drain\ \ was successful, we should be able to remove the\n# host without force with\ \ no issues. If there are still daemons\n# we will get a response telling us\ \ to drain the host and a\n# non-zero return code\nceph orch host rm $HOST_C\ \ --rm-crush-entry\n# verify we've successfully removed the host from the CRUSH\ \ map\nsleep 30\nceph osd getcrushmap -o compiled-crushmap\ncrushtool -d compiled-crushmap\ \ -o crushmap.txt\nCRUSH_MAP=$(cat crushmap.txt)\nif grep -q \"$HOST_C\" <<<\ \ \"$CRUSH_MAP\"; then\n printf \"Saw $HOST_C in CRUSH map after it should\ \ have been removed.\\n\\n$CRUSH_MAP\"\n exit 1\nfi\n" teuthology: fragments_dropped: [] meta: {} postmerge: [] teuthology_branch: clyso-debian-13 teuthology_repo: https://github.com/clyso/teuthology teuthology_sha1: 1c580df7a9c7c2aadc272da296344fd99f27c444 timestamp: 2026-03-07_10:02:54 tube: vps user: irq0 verbose: false worker_log: /home/teuthos/.teuthology/dispatcher/dispatcher.vps.2764 2026-03-07T10:06:20.233 INFO:teuthology.run:suite_path is set to /home/teuthos/src/github.com_kshtsk_ceph_f96e33505a05da25eb24b46ae34fbbd1718a702b/qa; will attempt to use it 2026-03-07T10:06:20.233 INFO:teuthology.run:Found tasks at /home/teuthos/src/github.com_kshtsk_ceph_f96e33505a05da25eb24b46ae34fbbd1718a702b/qa/tasks 2026-03-07T10:06:20.233 INFO:teuthology.run_tasks:Running task internal.save_config... 2026-03-07T10:06:20.233 INFO:teuthology.task.internal:Saving configuration 2026-03-07T10:06:20.239 INFO:teuthology.run_tasks:Running task internal.check_lock... 2026-03-07T10:06:20.240 INFO:teuthology.task.internal.check_lock:Checking locks... 2026-03-07T10:06:20.245 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm01.local', 'description': '/archive/irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/10', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-07 10:04:41.216261', 'locked_by': 'irq0', 'mac_address': '52:55:00:00:00:01', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBBI0gV1mXoWqxwHZ7kVKoQs0nE+2LQ+M8MOa5O4NX/KEQbdbgqr1NqZ0vZQQat+MWMHbMtEjKmfmqyxhiNA6aUQ='} 2026-03-07T10:06:20.252 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm04.local', 'description': '/archive/irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/10', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-07 10:04:41.216983', 'locked_by': 'irq0', 'mac_address': '52:55:00:00:00:04', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBArEyZT3Y3HawOhf365kL7KuhAUYmgz9Z0n/9FJrE8zyi+jojfodY+iS9WPIthfkG+NBSHYYskmKU+o95rbqNiI='} 2026-03-07T10:06:20.257 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm07.local', 'description': '/archive/irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/10', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-07 10:04:41.216755', 'locked_by': 'irq0', 'mac_address': '52:55:00:00:00:07', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNwUgCSVXpLtdEzkll7Gr/Ag7HDAxMa3Kj5lU6DJVMiHg2sb1aIR7qkosoDQM/13It0RhQ1qte8n+Blvxmoh9ac='} 2026-03-07T10:06:20.257 INFO:teuthology.run_tasks:Running task internal.add_remotes... 2026-03-07T10:06:20.257 INFO:teuthology.task.internal:roles: ubuntu@vm01.local - ['host.a', 'mon.a', 'mgr.a', 'osd.0', 'osd.1'] 2026-03-07T10:06:20.257 INFO:teuthology.task.internal:roles: ubuntu@vm04.local - ['host.b', 'mon.b', 'mgr.b', 'osd.2', 'osd.3'] 2026-03-07T10:06:20.257 INFO:teuthology.task.internal:roles: ubuntu@vm07.local - ['host.c', 'mon.c', 'osd.4', 'osd.5'] 2026-03-07T10:06:20.257 INFO:teuthology.run_tasks:Running task console_log... 2026-03-07T10:06:20.262 DEBUG:teuthology.task.console_log:vm01 does not support IPMI; excluding 2026-03-07T10:06:20.267 DEBUG:teuthology.task.console_log:vm04 does not support IPMI; excluding 2026-03-07T10:06:20.271 DEBUG:teuthology.task.console_log:vm07 does not support IPMI; excluding 2026-03-07T10:06:20.271 DEBUG:teuthology.exit:Installing handler: Handler(exiter=, func=.kill_console_loggers at 0x7fa2f6b8bf40>, signals=[15]) 2026-03-07T10:06:20.271 INFO:teuthology.run_tasks:Running task internal.connect... 2026-03-07T10:06:20.272 INFO:teuthology.task.internal:Opening connections... 2026-03-07T10:06:20.272 DEBUG:teuthology.task.internal:connecting to ubuntu@vm01.local 2026-03-07T10:06:20.272 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm01.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-07T10:06:20.331 DEBUG:teuthology.task.internal:connecting to ubuntu@vm04.local 2026-03-07T10:06:20.332 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm04.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-07T10:06:20.392 DEBUG:teuthology.task.internal:connecting to ubuntu@vm07.local 2026-03-07T10:06:20.393 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm07.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-07T10:06:20.452 INFO:teuthology.run_tasks:Running task internal.push_inventory... 2026-03-07T10:06:20.453 DEBUG:teuthology.orchestra.run.vm01:> uname -m 2026-03-07T10:06:20.468 INFO:teuthology.orchestra.run.vm01.stdout:x86_64 2026-03-07T10:06:20.468 DEBUG:teuthology.orchestra.run.vm01:> cat /etc/os-release 2026-03-07T10:06:20.522 INFO:teuthology.orchestra.run.vm01.stdout:NAME="CentOS Stream" 2026-03-07T10:06:20.522 INFO:teuthology.orchestra.run.vm01.stdout:VERSION="9" 2026-03-07T10:06:20.522 INFO:teuthology.orchestra.run.vm01.stdout:ID="centos" 2026-03-07T10:06:20.522 INFO:teuthology.orchestra.run.vm01.stdout:ID_LIKE="rhel fedora" 2026-03-07T10:06:20.522 INFO:teuthology.orchestra.run.vm01.stdout:VERSION_ID="9" 2026-03-07T10:06:20.522 INFO:teuthology.orchestra.run.vm01.stdout:PLATFORM_ID="platform:el9" 2026-03-07T10:06:20.523 INFO:teuthology.orchestra.run.vm01.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-07T10:06:20.523 INFO:teuthology.orchestra.run.vm01.stdout:ANSI_COLOR="0;31" 2026-03-07T10:06:20.523 INFO:teuthology.orchestra.run.vm01.stdout:LOGO="fedora-logo-icon" 2026-03-07T10:06:20.523 INFO:teuthology.orchestra.run.vm01.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-07T10:06:20.523 INFO:teuthology.orchestra.run.vm01.stdout:HOME_URL="https://centos.org/" 2026-03-07T10:06:20.523 INFO:teuthology.orchestra.run.vm01.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-07T10:06:20.523 INFO:teuthology.orchestra.run.vm01.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-07T10:06:20.523 INFO:teuthology.orchestra.run.vm01.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-07T10:06:20.523 INFO:teuthology.lock.ops:Updating vm01.local on lock server 2026-03-07T10:06:20.527 DEBUG:teuthology.orchestra.run.vm04:> uname -m 2026-03-07T10:06:20.541 INFO:teuthology.orchestra.run.vm04.stdout:x86_64 2026-03-07T10:06:20.541 DEBUG:teuthology.orchestra.run.vm04:> cat /etc/os-release 2026-03-07T10:06:20.595 INFO:teuthology.orchestra.run.vm04.stdout:NAME="CentOS Stream" 2026-03-07T10:06:20.595 INFO:teuthology.orchestra.run.vm04.stdout:VERSION="9" 2026-03-07T10:06:20.595 INFO:teuthology.orchestra.run.vm04.stdout:ID="centos" 2026-03-07T10:06:20.596 INFO:teuthology.orchestra.run.vm04.stdout:ID_LIKE="rhel fedora" 2026-03-07T10:06:20.596 INFO:teuthology.orchestra.run.vm04.stdout:VERSION_ID="9" 2026-03-07T10:06:20.596 INFO:teuthology.orchestra.run.vm04.stdout:PLATFORM_ID="platform:el9" 2026-03-07T10:06:20.596 INFO:teuthology.orchestra.run.vm04.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-07T10:06:20.596 INFO:teuthology.orchestra.run.vm04.stdout:ANSI_COLOR="0;31" 2026-03-07T10:06:20.596 INFO:teuthology.orchestra.run.vm04.stdout:LOGO="fedora-logo-icon" 2026-03-07T10:06:20.596 INFO:teuthology.orchestra.run.vm04.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-07T10:06:20.596 INFO:teuthology.orchestra.run.vm04.stdout:HOME_URL="https://centos.org/" 2026-03-07T10:06:20.596 INFO:teuthology.orchestra.run.vm04.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-07T10:06:20.596 INFO:teuthology.orchestra.run.vm04.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-07T10:06:20.596 INFO:teuthology.orchestra.run.vm04.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-07T10:06:20.596 INFO:teuthology.lock.ops:Updating vm04.local on lock server 2026-03-07T10:06:20.599 DEBUG:teuthology.orchestra.run.vm07:> uname -m 2026-03-07T10:06:20.614 INFO:teuthology.orchestra.run.vm07.stdout:x86_64 2026-03-07T10:06:20.614 DEBUG:teuthology.orchestra.run.vm07:> cat /etc/os-release 2026-03-07T10:06:20.668 INFO:teuthology.orchestra.run.vm07.stdout:NAME="CentOS Stream" 2026-03-07T10:06:20.668 INFO:teuthology.orchestra.run.vm07.stdout:VERSION="9" 2026-03-07T10:06:20.668 INFO:teuthology.orchestra.run.vm07.stdout:ID="centos" 2026-03-07T10:06:20.668 INFO:teuthology.orchestra.run.vm07.stdout:ID_LIKE="rhel fedora" 2026-03-07T10:06:20.668 INFO:teuthology.orchestra.run.vm07.stdout:VERSION_ID="9" 2026-03-07T10:06:20.668 INFO:teuthology.orchestra.run.vm07.stdout:PLATFORM_ID="platform:el9" 2026-03-07T10:06:20.668 INFO:teuthology.orchestra.run.vm07.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-07T10:06:20.668 INFO:teuthology.orchestra.run.vm07.stdout:ANSI_COLOR="0;31" 2026-03-07T10:06:20.668 INFO:teuthology.orchestra.run.vm07.stdout:LOGO="fedora-logo-icon" 2026-03-07T10:06:20.668 INFO:teuthology.orchestra.run.vm07.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-07T10:06:20.668 INFO:teuthology.orchestra.run.vm07.stdout:HOME_URL="https://centos.org/" 2026-03-07T10:06:20.668 INFO:teuthology.orchestra.run.vm07.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-07T10:06:20.668 INFO:teuthology.orchestra.run.vm07.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-07T10:06:20.668 INFO:teuthology.orchestra.run.vm07.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-07T10:06:20.668 INFO:teuthology.lock.ops:Updating vm07.local on lock server 2026-03-07T10:06:20.672 INFO:teuthology.run_tasks:Running task internal.serialize_remote_roles... 2026-03-07T10:06:20.674 INFO:teuthology.run_tasks:Running task internal.check_conflict... 2026-03-07T10:06:20.675 INFO:teuthology.task.internal:Checking for old test directory... 2026-03-07T10:06:20.675 DEBUG:teuthology.orchestra.run.vm01:> test '!' -e /home/ubuntu/cephtest 2026-03-07T10:06:20.676 DEBUG:teuthology.orchestra.run.vm04:> test '!' -e /home/ubuntu/cephtest 2026-03-07T10:06:20.678 DEBUG:teuthology.orchestra.run.vm07:> test '!' -e /home/ubuntu/cephtest 2026-03-07T10:06:20.722 INFO:teuthology.run_tasks:Running task internal.check_ceph_data... 2026-03-07T10:06:20.723 INFO:teuthology.task.internal:Checking for non-empty /var/lib/ceph... 2026-03-07T10:06:20.723 DEBUG:teuthology.orchestra.run.vm01:> test -z $(ls -A /var/lib/ceph) 2026-03-07T10:06:20.731 DEBUG:teuthology.orchestra.run.vm04:> test -z $(ls -A /var/lib/ceph) 2026-03-07T10:06:20.735 DEBUG:teuthology.orchestra.run.vm07:> test -z $(ls -A /var/lib/ceph) 2026-03-07T10:06:20.744 INFO:teuthology.orchestra.run.vm01.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-07T10:06:20.747 INFO:teuthology.orchestra.run.vm04.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-07T10:06:20.777 INFO:teuthology.orchestra.run.vm07.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-07T10:06:20.777 INFO:teuthology.run_tasks:Running task internal.vm_setup... 2026-03-07T10:06:20.785 DEBUG:teuthology.orchestra.run.vm01:> test -e /ceph-qa-ready 2026-03-07T10:06:20.799 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-07T10:06:20.993 DEBUG:teuthology.orchestra.run.vm04:> test -e /ceph-qa-ready 2026-03-07T10:06:21.007 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-07T10:06:21.192 DEBUG:teuthology.orchestra.run.vm07:> test -e /ceph-qa-ready 2026-03-07T10:06:21.206 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-07T10:06:21.388 INFO:teuthology.run_tasks:Running task internal.base... 2026-03-07T10:06:21.389 INFO:teuthology.task.internal:Creating test directory... 2026-03-07T10:06:21.389 DEBUG:teuthology.orchestra.run.vm01:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-07T10:06:21.391 DEBUG:teuthology.orchestra.run.vm04:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-07T10:06:21.393 DEBUG:teuthology.orchestra.run.vm07:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-07T10:06:21.407 INFO:teuthology.run_tasks:Running task internal.archive_upload... 2026-03-07T10:06:21.409 INFO:teuthology.run_tasks:Running task internal.archive... 2026-03-07T10:06:21.410 INFO:teuthology.task.internal:Creating archive directory... 2026-03-07T10:06:21.410 DEBUG:teuthology.orchestra.run.vm01:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-07T10:06:21.448 DEBUG:teuthology.orchestra.run.vm04:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-07T10:06:21.450 DEBUG:teuthology.orchestra.run.vm07:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-07T10:06:21.471 INFO:teuthology.run_tasks:Running task internal.coredump... 2026-03-07T10:06:21.472 INFO:teuthology.task.internal:Enabling coredump saving... 2026-03-07T10:06:21.472 DEBUG:teuthology.orchestra.run.vm01:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-07T10:06:21.516 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-07T10:06:21.517 DEBUG:teuthology.orchestra.run.vm04:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-07T10:06:21.533 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-07T10:06:21.534 DEBUG:teuthology.orchestra.run.vm07:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-07T10:06:21.551 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-07T10:06:21.551 DEBUG:teuthology.orchestra.run.vm01:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-07T10:06:21.559 DEBUG:teuthology.orchestra.run.vm04:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-07T10:06:21.575 DEBUG:teuthology.orchestra.run.vm07:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-07T10:06:21.581 INFO:teuthology.orchestra.run.vm01.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-07T10:06:21.591 INFO:teuthology.orchestra.run.vm01.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-07T10:06:21.599 INFO:teuthology.orchestra.run.vm04.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-07T10:06:21.610 INFO:teuthology.orchestra.run.vm04.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-07T10:06:21.620 INFO:teuthology.orchestra.run.vm07.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-07T10:06:21.631 INFO:teuthology.orchestra.run.vm07.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-07T10:06:21.632 INFO:teuthology.run_tasks:Running task internal.sudo... 2026-03-07T10:06:21.633 INFO:teuthology.task.internal:Configuring sudo... 2026-03-07T10:06:21.633 DEBUG:teuthology.orchestra.run.vm01:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-07T10:06:21.635 DEBUG:teuthology.orchestra.run.vm04:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-07T10:06:21.654 DEBUG:teuthology.orchestra.run.vm07:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-07T10:06:21.695 INFO:teuthology.run_tasks:Running task internal.syslog... 2026-03-07T10:06:21.697 INFO:teuthology.task.internal.syslog:Starting syslog monitoring... 2026-03-07T10:06:21.697 DEBUG:teuthology.orchestra.run.vm01:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-07T10:06:21.702 DEBUG:teuthology.orchestra.run.vm04:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-07T10:06:21.720 DEBUG:teuthology.orchestra.run.vm07:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-07T10:06:21.751 DEBUG:teuthology.orchestra.run.vm01:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-07T10:06:21.782 DEBUG:teuthology.orchestra.run.vm01:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-07T10:06:21.839 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-03-07T10:06:21.839 DEBUG:teuthology.orchestra.run.vm01:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-07T10:06:21.895 DEBUG:teuthology.orchestra.run.vm04:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-07T10:06:21.917 DEBUG:teuthology.orchestra.run.vm04:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-07T10:06:21.974 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-03-07T10:06:21.974 DEBUG:teuthology.orchestra.run.vm04:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-07T10:06:22.032 DEBUG:teuthology.orchestra.run.vm07:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-07T10:06:22.056 DEBUG:teuthology.orchestra.run.vm07:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-07T10:06:22.113 DEBUG:teuthology.orchestra.run.vm07:> set -ex 2026-03-07T10:06:22.113 DEBUG:teuthology.orchestra.run.vm07:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-07T10:06:22.171 DEBUG:teuthology.orchestra.run.vm01:> sudo service rsyslog restart 2026-03-07T10:06:22.173 DEBUG:teuthology.orchestra.run.vm04:> sudo service rsyslog restart 2026-03-07T10:06:22.175 DEBUG:teuthology.orchestra.run.vm07:> sudo service rsyslog restart 2026-03-07T10:06:22.203 INFO:teuthology.orchestra.run.vm01.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-07T10:06:22.205 INFO:teuthology.orchestra.run.vm04.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-07T10:06:22.243 INFO:teuthology.orchestra.run.vm07.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-07T10:06:22.560 INFO:teuthology.run_tasks:Running task internal.timer... 2026-03-07T10:06:22.562 INFO:teuthology.task.internal:Starting timer... 2026-03-07T10:06:22.562 INFO:teuthology.run_tasks:Running task pcp... 2026-03-07T10:06:22.564 INFO:teuthology.run_tasks:Running task selinux... 2026-03-07T10:06:22.566 DEBUG:teuthology.task:Applying overrides for task selinux: {'allowlist': ['scontext=system_u:system_r:logrotate_t:s0', 'scontext=system_u:system_r:getty_t:s0']} 2026-03-07T10:06:22.566 INFO:teuthology.task.selinux:Excluding vm01: VMs are not yet supported 2026-03-07T10:06:22.566 INFO:teuthology.task.selinux:Excluding vm04: VMs are not yet supported 2026-03-07T10:06:22.566 INFO:teuthology.task.selinux:Excluding vm07: VMs are not yet supported 2026-03-07T10:06:22.566 DEBUG:teuthology.task.selinux:Getting current SELinux state 2026-03-07T10:06:22.566 DEBUG:teuthology.task.selinux:Existing SELinux modes: {} 2026-03-07T10:06:22.566 INFO:teuthology.task.selinux:Putting SELinux into permissive mode 2026-03-07T10:06:22.566 INFO:teuthology.run_tasks:Running task ansible.cephlab... 2026-03-07T10:06:22.568 DEBUG:teuthology.task:Applying overrides for task ansible.cephlab: {'branch': 'main', 'repo': 'https://github.com/kshtsk/ceph-cm-ansible.git', 'skip_tags': 'nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs', 'vars': {'timezone': 'UTC'}} 2026-03-07T10:06:22.568 DEBUG:teuthology.repo_utils:Setting repo remote to https://github.com/kshtsk/ceph-cm-ansible.git 2026-03-07T10:06:22.569 INFO:teuthology.repo_utils:Fetching github.com_kshtsk_ceph-cm-ansible_main from origin 2026-03-07T10:06:23.134 DEBUG:teuthology.repo_utils:Resetting repo at /home/teuthos/src/github.com_kshtsk_ceph-cm-ansible_main to origin/main 2026-03-07T10:06:23.139 INFO:teuthology.task.ansible:Playbook: [{'import_playbook': 'ansible_managed.yml'}, {'import_playbook': 'teuthology.yml'}, {'hosts': 'testnodes', 'tasks': [{'set_fact': {'ran_from_cephlab_playbook': True}}]}, {'import_playbook': 'testnodes.yml'}, {'import_playbook': 'container-host.yml'}, {'import_playbook': 'cobbler.yml'}, {'import_playbook': 'paddles.yml'}, {'import_playbook': 'pulpito.yml'}, {'hosts': 'testnodes', 'become': True, 'tasks': [{'name': 'Touch /ceph-qa-ready', 'file': {'path': '/ceph-qa-ready', 'state': 'touch'}, 'when': 'ran_from_cephlab_playbook|bool'}]}] 2026-03-07T10:06:23.139 DEBUG:teuthology.task.ansible:Running ansible-playbook -v --extra-vars '{"ansible_ssh_user": "ubuntu", "timezone": "UTC"}' -i /tmp/teuth_ansible_inventoryuh1tnq3h --limit vm01.local,vm04.local,vm07.local /home/teuthos/src/github.com_kshtsk_ceph-cm-ansible_main/cephlab.yml --skip-tags nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs 2026-03-07T10:13:08.799 DEBUG:teuthology.task.ansible:Reconnecting to [Remote(name='ubuntu@vm01.local'), Remote(name='ubuntu@vm04.local'), Remote(name='ubuntu@vm07.local')] 2026-03-07T10:13:08.799 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm01.local' 2026-03-07T10:13:08.800 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm01.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-07T10:13:08.857 DEBUG:teuthology.orchestra.run.vm01:> true 2026-03-07T10:13:08.931 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm01.local' 2026-03-07T10:13:08.931 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm04.local' 2026-03-07T10:13:08.932 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm04.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-07T10:13:08.990 DEBUG:teuthology.orchestra.run.vm04:> true 2026-03-07T10:13:09.064 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm04.local' 2026-03-07T10:13:09.064 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm07.local' 2026-03-07T10:13:09.064 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm07.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-07T10:13:09.127 DEBUG:teuthology.orchestra.run.vm07:> true 2026-03-07T10:13:09.209 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm07.local' 2026-03-07T10:13:09.209 INFO:teuthology.run_tasks:Running task clock... 2026-03-07T10:13:09.211 INFO:teuthology.task.clock:Syncing clocks and checking initial clock skew... 2026-03-07T10:13:09.212 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-07T10:13:09.212 DEBUG:teuthology.orchestra.run.vm01:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-07T10:13:09.213 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-07T10:13:09.213 DEBUG:teuthology.orchestra.run.vm04:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-07T10:13:09.215 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-07T10:13:09.216 DEBUG:teuthology.orchestra.run.vm07:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-07T10:13:09.239 INFO:teuthology.orchestra.run.vm01.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-07T10:13:09.241 INFO:teuthology.orchestra.run.vm04.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-07T10:13:09.251 INFO:teuthology.orchestra.run.vm01.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-07T10:13:09.254 INFO:teuthology.orchestra.run.vm04.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-07T10:13:09.275 INFO:teuthology.orchestra.run.vm01.stderr:sudo: ntpd: command not found 2026-03-07T10:13:09.279 INFO:teuthology.orchestra.run.vm04.stderr:sudo: ntpd: command not found 2026-03-07T10:13:09.286 INFO:teuthology.orchestra.run.vm01.stdout:506 Cannot talk to daemon 2026-03-07T10:13:09.287 INFO:teuthology.orchestra.run.vm07.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-07T10:13:09.290 INFO:teuthology.orchestra.run.vm04.stdout:506 Cannot talk to daemon 2026-03-07T10:13:09.298 INFO:teuthology.orchestra.run.vm01.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-07T10:13:09.302 INFO:teuthology.orchestra.run.vm04.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-07T10:13:09.302 INFO:teuthology.orchestra.run.vm07.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-07T10:13:09.311 INFO:teuthology.orchestra.run.vm01.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-07T10:13:09.316 INFO:teuthology.orchestra.run.vm04.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-07T10:13:09.330 INFO:teuthology.orchestra.run.vm07.stderr:sudo: ntpd: command not found 2026-03-07T10:13:09.341 INFO:teuthology.orchestra.run.vm07.stdout:506 Cannot talk to daemon 2026-03-07T10:13:09.355 INFO:teuthology.orchestra.run.vm07.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-07T10:13:09.361 INFO:teuthology.orchestra.run.vm01.stderr:bash: line 1: ntpq: command not found 2026-03-07T10:13:09.363 INFO:teuthology.orchestra.run.vm01.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-07T10:13:09.363 INFO:teuthology.orchestra.run.vm01.stdout:=============================================================================== 2026-03-07T10:13:09.368 INFO:teuthology.orchestra.run.vm04.stderr:bash: line 1: ntpq: command not found 2026-03-07T10:13:09.370 INFO:teuthology.orchestra.run.vm07.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-07T10:13:09.370 INFO:teuthology.orchestra.run.vm04.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-07T10:13:09.370 INFO:teuthology.orchestra.run.vm04.stdout:=============================================================================== 2026-03-07T10:13:09.411 INFO:teuthology.orchestra.run.vm07.stderr:bash: line 1: ntpq: command not found 2026-03-07T10:13:09.413 INFO:teuthology.orchestra.run.vm07.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-07T10:13:09.413 INFO:teuthology.orchestra.run.vm07.stdout:=============================================================================== 2026-03-07T10:13:09.414 INFO:teuthology.run_tasks:Running task pexec... 2026-03-07T10:13:09.416 INFO:teuthology.task.pexec:Executing custom commands... 2026-03-07T10:13:09.416 DEBUG:teuthology.orchestra.run.vm01:> TESTDIR=/home/ubuntu/cephtest bash -s 2026-03-07T10:13:09.416 DEBUG:teuthology.orchestra.run.vm04:> TESTDIR=/home/ubuntu/cephtest bash -s 2026-03-07T10:13:09.416 DEBUG:teuthology.orchestra.run.vm07:> TESTDIR=/home/ubuntu/cephtest bash -s 2026-03-07T10:13:09.418 DEBUG:teuthology.task.pexec:ubuntu@vm01.local< sudo dnf remove nvme-cli -y 2026-03-07T10:13:09.418 DEBUG:teuthology.task.pexec:ubuntu@vm01.local< sudo dnf install nvmetcli nvme-cli -y 2026-03-07T10:13:09.418 INFO:teuthology.task.pexec:Running commands on host ubuntu@vm01.local 2026-03-07T10:13:09.418 INFO:teuthology.task.pexec:sudo dnf remove nvme-cli -y 2026-03-07T10:13:09.418 INFO:teuthology.task.pexec:sudo dnf install nvmetcli nvme-cli -y 2026-03-07T10:13:09.418 DEBUG:teuthology.task.pexec:ubuntu@vm04.local< sudo dnf remove nvme-cli -y 2026-03-07T10:13:09.418 DEBUG:teuthology.task.pexec:ubuntu@vm04.local< sudo dnf install nvmetcli nvme-cli -y 2026-03-07T10:13:09.418 INFO:teuthology.task.pexec:Running commands on host ubuntu@vm04.local 2026-03-07T10:13:09.418 INFO:teuthology.task.pexec:sudo dnf remove nvme-cli -y 2026-03-07T10:13:09.418 INFO:teuthology.task.pexec:sudo dnf install nvmetcli nvme-cli -y 2026-03-07T10:13:09.455 DEBUG:teuthology.task.pexec:ubuntu@vm07.local< sudo dnf remove nvme-cli -y 2026-03-07T10:13:09.455 DEBUG:teuthology.task.pexec:ubuntu@vm07.local< sudo dnf install nvmetcli nvme-cli -y 2026-03-07T10:13:09.455 INFO:teuthology.task.pexec:Running commands on host ubuntu@vm07.local 2026-03-07T10:13:09.456 INFO:teuthology.task.pexec:sudo dnf remove nvme-cli -y 2026-03-07T10:13:09.456 INFO:teuthology.task.pexec:sudo dnf install nvmetcli nvme-cli -y 2026-03-07T10:13:09.599 INFO:teuthology.orchestra.run.vm01.stdout:No match for argument: nvme-cli 2026-03-07T10:13:09.599 INFO:teuthology.orchestra.run.vm01.stderr:No packages marked for removal. 2026-03-07T10:13:09.602 INFO:teuthology.orchestra.run.vm04.stdout:No match for argument: nvme-cli 2026-03-07T10:13:09.602 INFO:teuthology.orchestra.run.vm04.stderr:No packages marked for removal. 2026-03-07T10:13:09.602 INFO:teuthology.orchestra.run.vm01.stdout:Dependencies resolved. 2026-03-07T10:13:09.602 INFO:teuthology.orchestra.run.vm01.stdout:Nothing to do. 2026-03-07T10:13:09.602 INFO:teuthology.orchestra.run.vm01.stdout:Complete! 2026-03-07T10:13:09.605 INFO:teuthology.orchestra.run.vm04.stdout:Dependencies resolved. 2026-03-07T10:13:09.605 INFO:teuthology.orchestra.run.vm04.stdout:Nothing to do. 2026-03-07T10:13:09.605 INFO:teuthology.orchestra.run.vm04.stdout:Complete! 2026-03-07T10:13:09.651 INFO:teuthology.orchestra.run.vm07.stdout:No match for argument: nvme-cli 2026-03-07T10:13:09.651 INFO:teuthology.orchestra.run.vm07.stderr:No packages marked for removal. 2026-03-07T10:13:09.654 INFO:teuthology.orchestra.run.vm07.stdout:Dependencies resolved. 2026-03-07T10:13:09.654 INFO:teuthology.orchestra.run.vm07.stdout:Nothing to do. 2026-03-07T10:13:09.654 INFO:teuthology.orchestra.run.vm07.stdout:Complete! 2026-03-07T10:13:09.960 INFO:teuthology.orchestra.run.vm01.stdout:Last metadata expiration check: 0:06:01 ago on Sat 07 Mar 2026 10:07:08 AM UTC. 2026-03-07T10:13:09.962 INFO:teuthology.orchestra.run.vm04.stdout:Last metadata expiration check: 0:05:59 ago on Sat 07 Mar 2026 10:07:10 AM UTC. 2026-03-07T10:13:10.034 INFO:teuthology.orchestra.run.vm07.stdout:Last metadata expiration check: 0:00:57 ago on Sat 07 Mar 2026 10:12:13 AM UTC. 2026-03-07T10:13:10.053 INFO:teuthology.orchestra.run.vm01.stdout:Dependencies resolved. 2026-03-07T10:13:10.053 INFO:teuthology.orchestra.run.vm01.stdout:================================================================================ 2026-03-07T10:13:10.053 INFO:teuthology.orchestra.run.vm01.stdout: Package Architecture Version Repository Size 2026-03-07T10:13:10.053 INFO:teuthology.orchestra.run.vm01.stdout:================================================================================ 2026-03-07T10:13:10.053 INFO:teuthology.orchestra.run.vm01.stdout:Installing: 2026-03-07T10:13:10.053 INFO:teuthology.orchestra.run.vm01.stdout: nvme-cli x86_64 2.16-1.el9 baseos 1.2 M 2026-03-07T10:13:10.053 INFO:teuthology.orchestra.run.vm01.stdout: nvmetcli noarch 0.8-3.el9 baseos 44 k 2026-03-07T10:13:10.053 INFO:teuthology.orchestra.run.vm01.stdout:Installing dependencies: 2026-03-07T10:13:10.053 INFO:teuthology.orchestra.run.vm01.stdout: python3-configshell noarch 1:1.1.30-1.el9 baseos 72 k 2026-03-07T10:13:10.053 INFO:teuthology.orchestra.run.vm01.stdout: python3-kmod x86_64 0.9-32.el9 baseos 84 k 2026-03-07T10:13:10.053 INFO:teuthology.orchestra.run.vm01.stdout: python3-pyparsing noarch 2.4.7-9.el9 baseos 150 k 2026-03-07T10:13:10.053 INFO:teuthology.orchestra.run.vm01.stdout: python3-urwid x86_64 2.1.2-4.el9 baseos 837 k 2026-03-07T10:13:10.053 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-07T10:13:10.053 INFO:teuthology.orchestra.run.vm01.stdout:Transaction Summary 2026-03-07T10:13:10.053 INFO:teuthology.orchestra.run.vm01.stdout:================================================================================ 2026-03-07T10:13:10.053 INFO:teuthology.orchestra.run.vm01.stdout:Install 6 Packages 2026-03-07T10:13:10.053 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-07T10:13:10.053 INFO:teuthology.orchestra.run.vm04.stdout:Dependencies resolved. 2026-03-07T10:13:10.053 INFO:teuthology.orchestra.run.vm01.stdout:Total download size: 2.3 M 2026-03-07T10:13:10.053 INFO:teuthology.orchestra.run.vm01.stdout:Installed size: 11 M 2026-03-07T10:13:10.054 INFO:teuthology.orchestra.run.vm01.stdout:Downloading Packages: 2026-03-07T10:13:10.054 INFO:teuthology.orchestra.run.vm04.stdout:================================================================================ 2026-03-07T10:13:10.054 INFO:teuthology.orchestra.run.vm04.stdout: Package Architecture Version Repository Size 2026-03-07T10:13:10.054 INFO:teuthology.orchestra.run.vm04.stdout:================================================================================ 2026-03-07T10:13:10.054 INFO:teuthology.orchestra.run.vm04.stdout:Installing: 2026-03-07T10:13:10.054 INFO:teuthology.orchestra.run.vm04.stdout: nvme-cli x86_64 2.16-1.el9 baseos 1.2 M 2026-03-07T10:13:10.054 INFO:teuthology.orchestra.run.vm04.stdout: nvmetcli noarch 0.8-3.el9 baseos 44 k 2026-03-07T10:13:10.054 INFO:teuthology.orchestra.run.vm04.stdout:Installing dependencies: 2026-03-07T10:13:10.054 INFO:teuthology.orchestra.run.vm04.stdout: python3-configshell noarch 1:1.1.30-1.el9 baseos 72 k 2026-03-07T10:13:10.054 INFO:teuthology.orchestra.run.vm04.stdout: python3-kmod x86_64 0.9-32.el9 baseos 84 k 2026-03-07T10:13:10.054 INFO:teuthology.orchestra.run.vm04.stdout: python3-pyparsing noarch 2.4.7-9.el9 baseos 150 k 2026-03-07T10:13:10.054 INFO:teuthology.orchestra.run.vm04.stdout: python3-urwid x86_64 2.1.2-4.el9 baseos 837 k 2026-03-07T10:13:10.054 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-07T10:13:10.054 INFO:teuthology.orchestra.run.vm04.stdout:Transaction Summary 2026-03-07T10:13:10.054 INFO:teuthology.orchestra.run.vm04.stdout:================================================================================ 2026-03-07T10:13:10.054 INFO:teuthology.orchestra.run.vm04.stdout:Install 6 Packages 2026-03-07T10:13:10.054 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-07T10:13:10.054 INFO:teuthology.orchestra.run.vm04.stdout:Total download size: 2.3 M 2026-03-07T10:13:10.054 INFO:teuthology.orchestra.run.vm04.stdout:Installed size: 11 M 2026-03-07T10:13:10.054 INFO:teuthology.orchestra.run.vm04.stdout:Downloading Packages: 2026-03-07T10:13:10.122 INFO:teuthology.orchestra.run.vm07.stdout:Dependencies resolved. 2026-03-07T10:13:10.123 INFO:teuthology.orchestra.run.vm07.stdout:================================================================================ 2026-03-07T10:13:10.123 INFO:teuthology.orchestra.run.vm07.stdout: Package Architecture Version Repository Size 2026-03-07T10:13:10.123 INFO:teuthology.orchestra.run.vm07.stdout:================================================================================ 2026-03-07T10:13:10.123 INFO:teuthology.orchestra.run.vm07.stdout:Installing: 2026-03-07T10:13:10.123 INFO:teuthology.orchestra.run.vm07.stdout: nvme-cli x86_64 2.16-1.el9 baseos 1.2 M 2026-03-07T10:13:10.123 INFO:teuthology.orchestra.run.vm07.stdout: nvmetcli noarch 0.8-3.el9 baseos 44 k 2026-03-07T10:13:10.123 INFO:teuthology.orchestra.run.vm07.stdout:Installing dependencies: 2026-03-07T10:13:10.123 INFO:teuthology.orchestra.run.vm07.stdout: python3-configshell noarch 1:1.1.30-1.el9 baseos 72 k 2026-03-07T10:13:10.123 INFO:teuthology.orchestra.run.vm07.stdout: python3-kmod x86_64 0.9-32.el9 baseos 84 k 2026-03-07T10:13:10.123 INFO:teuthology.orchestra.run.vm07.stdout: python3-pyparsing noarch 2.4.7-9.el9 baseos 150 k 2026-03-07T10:13:10.123 INFO:teuthology.orchestra.run.vm07.stdout: python3-urwid x86_64 2.1.2-4.el9 baseos 837 k 2026-03-07T10:13:10.123 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-07T10:13:10.123 INFO:teuthology.orchestra.run.vm07.stdout:Transaction Summary 2026-03-07T10:13:10.123 INFO:teuthology.orchestra.run.vm07.stdout:================================================================================ 2026-03-07T10:13:10.123 INFO:teuthology.orchestra.run.vm07.stdout:Install 6 Packages 2026-03-07T10:13:10.123 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-07T10:13:10.123 INFO:teuthology.orchestra.run.vm07.stdout:Total download size: 2.3 M 2026-03-07T10:13:10.123 INFO:teuthology.orchestra.run.vm07.stdout:Installed size: 11 M 2026-03-07T10:13:10.123 INFO:teuthology.orchestra.run.vm07.stdout:Downloading Packages: 2026-03-07T10:13:10.386 INFO:teuthology.orchestra.run.vm07.stdout:(1/6): nvmetcli-0.8-3.el9.noarch.rpm 276 kB/s | 44 kB 00:00 2026-03-07T10:13:10.417 INFO:teuthology.orchestra.run.vm07.stdout:(2/6): python3-configshell-1.1.30-1.el9.noarch. 379 kB/s | 72 kB 00:00 2026-03-07T10:13:10.494 INFO:teuthology.orchestra.run.vm07.stdout:(3/6): python3-kmod-0.9-32.el9.x86_64.rpm 782 kB/s | 84 kB 00:00 2026-03-07T10:13:10.521 INFO:teuthology.orchestra.run.vm07.stdout:(4/6): python3-pyparsing-2.4.7-9.el9.noarch.rpm 1.4 MB/s | 150 kB 00:00 2026-03-07T10:13:10.777 INFO:teuthology.orchestra.run.vm01.stdout:(1/6): python3-configshell-1.1.30-1.el9.noarch. 376 kB/s | 72 kB 00:00 2026-03-07T10:13:10.778 INFO:teuthology.orchestra.run.vm01.stdout:(2/6): nvmetcli-0.8-3.el9.noarch.rpm 228 kB/s | 44 kB 00:00 2026-03-07T10:13:10.805 INFO:teuthology.orchestra.run.vm07.stdout:(5/6): nvme-cli-2.16-1.el9.x86_64.rpm 2.0 MB/s | 1.2 MB 00:00 2026-03-07T10:13:10.824 INFO:teuthology.orchestra.run.vm07.stdout:(6/6): python3-urwid-2.1.2-4.el9.x86_64.rpm 2.5 MB/s | 837 kB 00:00 2026-03-07T10:13:10.824 INFO:teuthology.orchestra.run.vm07.stdout:-------------------------------------------------------------------------------- 2026-03-07T10:13:10.824 INFO:teuthology.orchestra.run.vm07.stdout:Total 3.3 MB/s | 2.3 MB 00:00 2026-03-07T10:13:10.874 INFO:teuthology.orchestra.run.vm01.stdout:(3/6): python3-kmod-0.9-32.el9.x86_64.rpm 871 kB/s | 84 kB 00:00 2026-03-07T10:13:10.875 INFO:teuthology.orchestra.run.vm07.stdout:Running transaction check 2026-03-07T10:13:10.875 INFO:teuthology.orchestra.run.vm01.stdout:(4/6): python3-pyparsing-2.4.7-9.el9.noarch.rpm 1.5 MB/s | 150 kB 00:00 2026-03-07T10:13:10.881 INFO:teuthology.orchestra.run.vm07.stdout:Transaction check succeeded. 2026-03-07T10:13:10.882 INFO:teuthology.orchestra.run.vm07.stdout:Running transaction test 2026-03-07T10:13:10.928 INFO:teuthology.orchestra.run.vm07.stdout:Transaction test succeeded. 2026-03-07T10:13:10.928 INFO:teuthology.orchestra.run.vm07.stdout:Running transaction 2026-03-07T10:13:10.970 INFO:teuthology.orchestra.run.vm01.stdout:(5/6): nvme-cli-2.16-1.el9.x86_64.rpm 3.0 MB/s | 1.2 MB 00:00 2026-03-07T10:13:10.984 INFO:teuthology.orchestra.run.vm04.stdout:(1/6): python3-configshell-1.1.30-1.el9.noarch. 181 kB/s | 72 kB 00:00 2026-03-07T10:13:11.022 INFO:teuthology.orchestra.run.vm01.stdout:(6/6): python3-urwid-2.1.2-4.el9.x86_64.rpm 5.6 MB/s | 837 kB 00:00 2026-03-07T10:13:11.022 INFO:teuthology.orchestra.run.vm01.stdout:-------------------------------------------------------------------------------- 2026-03-07T10:13:11.022 INFO:teuthology.orchestra.run.vm01.stdout:Total 2.4 MB/s | 2.3 MB 00:00 2026-03-07T10:13:11.073 INFO:teuthology.orchestra.run.vm07.stdout: Preparing : 1/1 2026-03-07T10:13:11.077 INFO:teuthology.orchestra.run.vm01.stdout:Running transaction check 2026-03-07T10:13:11.084 INFO:teuthology.orchestra.run.vm07.stdout: Installing : python3-urwid-2.1.2-4.el9.x86_64 1/6 2026-03-07T10:13:11.085 INFO:teuthology.orchestra.run.vm01.stdout:Transaction check succeeded. 2026-03-07T10:13:11.085 INFO:teuthology.orchestra.run.vm01.stdout:Running transaction test 2026-03-07T10:13:11.095 INFO:teuthology.orchestra.run.vm07.stdout: Installing : python3-pyparsing-2.4.7-9.el9.noarch 2/6 2026-03-07T10:13:11.103 INFO:teuthology.orchestra.run.vm07.stdout: Installing : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-03-07T10:13:11.111 INFO:teuthology.orchestra.run.vm07.stdout: Installing : python3-kmod-0.9-32.el9.x86_64 4/6 2026-03-07T10:13:11.113 INFO:teuthology.orchestra.run.vm07.stdout: Installing : nvmetcli-0.8-3.el9.noarch 5/6 2026-03-07T10:13:11.131 INFO:teuthology.orchestra.run.vm01.stdout:Transaction test succeeded. 2026-03-07T10:13:11.131 INFO:teuthology.orchestra.run.vm01.stdout:Running transaction 2026-03-07T10:13:11.261 INFO:teuthology.orchestra.run.vm07.stdout: Running scriptlet: nvmetcli-0.8-3.el9.noarch 5/6 2026-03-07T10:13:11.266 INFO:teuthology.orchestra.run.vm07.stdout: Installing : nvme-cli-2.16-1.el9.x86_64 6/6 2026-03-07T10:13:11.269 INFO:teuthology.orchestra.run.vm01.stdout: Preparing : 1/1 2026-03-07T10:13:11.279 INFO:teuthology.orchestra.run.vm01.stdout: Installing : python3-urwid-2.1.2-4.el9.x86_64 1/6 2026-03-07T10:13:11.290 INFO:teuthology.orchestra.run.vm01.stdout: Installing : python3-pyparsing-2.4.7-9.el9.noarch 2/6 2026-03-07T10:13:11.297 INFO:teuthology.orchestra.run.vm01.stdout: Installing : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-03-07T10:13:11.304 INFO:teuthology.orchestra.run.vm01.stdout: Installing : python3-kmod-0.9-32.el9.x86_64 4/6 2026-03-07T10:13:11.306 INFO:teuthology.orchestra.run.vm01.stdout: Installing : nvmetcli-0.8-3.el9.noarch 5/6 2026-03-07T10:13:11.362 INFO:teuthology.orchestra.run.vm04.stdout:(2/6): nvmetcli-0.8-3.el9.noarch.rpm 56 kB/s | 44 kB 00:00 2026-03-07T10:13:11.425 INFO:teuthology.orchestra.run.vm04.stdout:(3/6): python3-kmod-0.9-32.el9.x86_64.rpm 191 kB/s | 84 kB 00:00 2026-03-07T10:13:11.430 INFO:teuthology.orchestra.run.vm04.stdout:(4/6): nvme-cli-2.16-1.el9.x86_64.rpm 1.4 MB/s | 1.2 MB 00:00 2026-03-07T10:13:11.435 INFO:teuthology.orchestra.run.vm04.stdout:(5/6): python3-pyparsing-2.4.7-9.el9.noarch.rpm 2.0 MB/s | 150 kB 00:00 2026-03-07T10:13:11.446 INFO:teuthology.orchestra.run.vm01.stdout: Running scriptlet: nvmetcli-0.8-3.el9.noarch 5/6 2026-03-07T10:13:11.450 INFO:teuthology.orchestra.run.vm01.stdout: Installing : nvme-cli-2.16-1.el9.x86_64 6/6 2026-03-07T10:13:11.517 INFO:teuthology.orchestra.run.vm04.stdout:(6/6): python3-urwid-2.1.2-4.el9.x86_64.rpm 8.9 MB/s | 837 kB 00:00 2026-03-07T10:13:11.518 INFO:teuthology.orchestra.run.vm04.stdout:-------------------------------------------------------------------------------- 2026-03-07T10:13:11.518 INFO:teuthology.orchestra.run.vm04.stdout:Total 1.6 MB/s | 2.3 MB 00:01 2026-03-07T10:13:11.575 INFO:teuthology.orchestra.run.vm04.stdout:Running transaction check 2026-03-07T10:13:11.583 INFO:teuthology.orchestra.run.vm04.stdout:Transaction check succeeded. 2026-03-07T10:13:11.583 INFO:teuthology.orchestra.run.vm04.stdout:Running transaction test 2026-03-07T10:13:11.600 INFO:teuthology.orchestra.run.vm07.stdout: Running scriptlet: nvme-cli-2.16-1.el9.x86_64 6/6 2026-03-07T10:13:11.600 INFO:teuthology.orchestra.run.vm07.stdout:Created symlink /etc/systemd/system/default.target.wants/nvmefc-boot-connections.service → /usr/lib/systemd/system/nvmefc-boot-connections.service. 2026-03-07T10:13:11.600 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-07T10:13:11.630 INFO:teuthology.orchestra.run.vm04.stdout:Transaction test succeeded. 2026-03-07T10:13:11.630 INFO:teuthology.orchestra.run.vm04.stdout:Running transaction 2026-03-07T10:13:11.739 INFO:teuthology.orchestra.run.vm01.stdout: Running scriptlet: nvme-cli-2.16-1.el9.x86_64 6/6 2026-03-07T10:13:11.739 INFO:teuthology.orchestra.run.vm01.stdout:Created symlink /etc/systemd/system/default.target.wants/nvmefc-boot-connections.service → /usr/lib/systemd/system/nvmefc-boot-connections.service. 2026-03-07T10:13:11.739 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-07T10:13:11.768 INFO:teuthology.orchestra.run.vm04.stdout: Preparing : 1/1 2026-03-07T10:13:11.778 INFO:teuthology.orchestra.run.vm04.stdout: Installing : python3-urwid-2.1.2-4.el9.x86_64 1/6 2026-03-07T10:13:11.788 INFO:teuthology.orchestra.run.vm04.stdout: Installing : python3-pyparsing-2.4.7-9.el9.noarch 2/6 2026-03-07T10:13:11.795 INFO:teuthology.orchestra.run.vm04.stdout: Installing : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-03-07T10:13:11.802 INFO:teuthology.orchestra.run.vm04.stdout: Installing : python3-kmod-0.9-32.el9.x86_64 4/6 2026-03-07T10:13:11.804 INFO:teuthology.orchestra.run.vm04.stdout: Installing : nvmetcli-0.8-3.el9.noarch 5/6 2026-03-07T10:13:11.947 INFO:teuthology.orchestra.run.vm04.stdout: Running scriptlet: nvmetcli-0.8-3.el9.noarch 5/6 2026-03-07T10:13:11.952 INFO:teuthology.orchestra.run.vm04.stdout: Installing : nvme-cli-2.16-1.el9.x86_64 6/6 2026-03-07T10:13:12.171 INFO:teuthology.orchestra.run.vm01.stdout: Verifying : nvme-cli-2.16-1.el9.x86_64 1/6 2026-03-07T10:13:12.171 INFO:teuthology.orchestra.run.vm01.stdout: Verifying : nvmetcli-0.8-3.el9.noarch 2/6 2026-03-07T10:13:12.171 INFO:teuthology.orchestra.run.vm01.stdout: Verifying : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-03-07T10:13:12.171 INFO:teuthology.orchestra.run.vm01.stdout: Verifying : python3-kmod-0.9-32.el9.x86_64 4/6 2026-03-07T10:13:12.171 INFO:teuthology.orchestra.run.vm01.stdout: Verifying : python3-pyparsing-2.4.7-9.el9.noarch 5/6 2026-03-07T10:13:12.194 INFO:teuthology.orchestra.run.vm07.stdout: Verifying : nvme-cli-2.16-1.el9.x86_64 1/6 2026-03-07T10:13:12.194 INFO:teuthology.orchestra.run.vm07.stdout: Verifying : nvmetcli-0.8-3.el9.noarch 2/6 2026-03-07T10:13:12.194 INFO:teuthology.orchestra.run.vm07.stdout: Verifying : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-03-07T10:13:12.194 INFO:teuthology.orchestra.run.vm07.stdout: Verifying : python3-kmod-0.9-32.el9.x86_64 4/6 2026-03-07T10:13:12.194 INFO:teuthology.orchestra.run.vm07.stdout: Verifying : python3-pyparsing-2.4.7-9.el9.noarch 5/6 2026-03-07T10:13:12.232 INFO:teuthology.orchestra.run.vm01.stdout: Verifying : python3-urwid-2.1.2-4.el9.x86_64 6/6 2026-03-07T10:13:12.232 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-07T10:13:12.232 INFO:teuthology.orchestra.run.vm01.stdout:Installed: 2026-03-07T10:13:12.232 INFO:teuthology.orchestra.run.vm01.stdout: nvme-cli-2.16-1.el9.x86_64 nvmetcli-0.8-3.el9.noarch 2026-03-07T10:13:12.232 INFO:teuthology.orchestra.run.vm01.stdout: python3-configshell-1:1.1.30-1.el9.noarch python3-kmod-0.9-32.el9.x86_64 2026-03-07T10:13:12.232 INFO:teuthology.orchestra.run.vm01.stdout: python3-pyparsing-2.4.7-9.el9.noarch python3-urwid-2.1.2-4.el9.x86_64 2026-03-07T10:13:12.232 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-07T10:13:12.232 INFO:teuthology.orchestra.run.vm01.stdout:Complete! 2026-03-07T10:13:12.261 INFO:teuthology.orchestra.run.vm04.stdout: Running scriptlet: nvme-cli-2.16-1.el9.x86_64 6/6 2026-03-07T10:13:12.261 INFO:teuthology.orchestra.run.vm04.stdout:Created symlink /etc/systemd/system/default.target.wants/nvmefc-boot-connections.service → /usr/lib/systemd/system/nvmefc-boot-connections.service. 2026-03-07T10:13:12.261 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-07T10:13:12.274 DEBUG:teuthology.parallel:result is None 2026-03-07T10:13:12.298 INFO:teuthology.orchestra.run.vm07.stdout: Verifying : python3-urwid-2.1.2-4.el9.x86_64 6/6 2026-03-07T10:13:12.299 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-07T10:13:12.299 INFO:teuthology.orchestra.run.vm07.stdout:Installed: 2026-03-07T10:13:12.299 INFO:teuthology.orchestra.run.vm07.stdout: nvme-cli-2.16-1.el9.x86_64 nvmetcli-0.8-3.el9.noarch 2026-03-07T10:13:12.299 INFO:teuthology.orchestra.run.vm07.stdout: python3-configshell-1:1.1.30-1.el9.noarch python3-kmod-0.9-32.el9.x86_64 2026-03-07T10:13:12.299 INFO:teuthology.orchestra.run.vm07.stdout: python3-pyparsing-2.4.7-9.el9.noarch python3-urwid-2.1.2-4.el9.x86_64 2026-03-07T10:13:12.299 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-07T10:13:12.299 INFO:teuthology.orchestra.run.vm07.stdout:Complete! 2026-03-07T10:13:12.365 DEBUG:teuthology.parallel:result is None 2026-03-07T10:13:12.664 INFO:teuthology.orchestra.run.vm04.stdout: Verifying : nvme-cli-2.16-1.el9.x86_64 1/6 2026-03-07T10:13:12.664 INFO:teuthology.orchestra.run.vm04.stdout: Verifying : nvmetcli-0.8-3.el9.noarch 2/6 2026-03-07T10:13:12.664 INFO:teuthology.orchestra.run.vm04.stdout: Verifying : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-03-07T10:13:12.664 INFO:teuthology.orchestra.run.vm04.stdout: Verifying : python3-kmod-0.9-32.el9.x86_64 4/6 2026-03-07T10:13:12.664 INFO:teuthology.orchestra.run.vm04.stdout: Verifying : python3-pyparsing-2.4.7-9.el9.noarch 5/6 2026-03-07T10:13:12.723 INFO:teuthology.orchestra.run.vm04.stdout: Verifying : python3-urwid-2.1.2-4.el9.x86_64 6/6 2026-03-07T10:13:12.723 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-07T10:13:12.723 INFO:teuthology.orchestra.run.vm04.stdout:Installed: 2026-03-07T10:13:12.723 INFO:teuthology.orchestra.run.vm04.stdout: nvme-cli-2.16-1.el9.x86_64 nvmetcli-0.8-3.el9.noarch 2026-03-07T10:13:12.723 INFO:teuthology.orchestra.run.vm04.stdout: python3-configshell-1:1.1.30-1.el9.noarch python3-kmod-0.9-32.el9.x86_64 2026-03-07T10:13:12.723 INFO:teuthology.orchestra.run.vm04.stdout: python3-pyparsing-2.4.7-9.el9.noarch python3-urwid-2.1.2-4.el9.x86_64 2026-03-07T10:13:12.723 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-07T10:13:12.723 INFO:teuthology.orchestra.run.vm04.stdout:Complete! 2026-03-07T10:13:12.763 DEBUG:teuthology.parallel:result is None 2026-03-07T10:13:12.763 INFO:teuthology.run_tasks:Running task cephadm... 2026-03-07T10:13:12.810 INFO:tasks.cephadm:Config: {'conf': {'global': {'mon election default strategy': 3}, 'mgr': {'debug mgr': 20, 'debug ms': 1, 'mgr/cephadm/use_agent': True}, 'mon': {'debug mon': 20, 'debug ms': 1, 'debug paxos': 20}, 'osd': {'debug ms': 1, 'debug osd': 20, 'osd mclock iops capacity threshold hdd': 49000}}, 'flavor': 'default', 'log-ignorelist': ['\\(MDS_ALL_DOWN\\)', '\\(MDS_UP_LESS_THAN_MAX\\)', 'MON_DOWN', 'mons down', 'mon down', 'out of quorum', 'CEPHADM_STRAY_HOST', 'CEPHADM_STRAY_DAEMON', 'CEPHADM_FAILED_DAEMON'], 'log-only-match': ['CEPHADM_'], 'sha1': '340d3c24fc6ae7529322dc7ccee6c6cb2589da0a', 'cephadm_binary_url': 'https://download.ceph.com/rpm-19.2.3/el9/noarch/cephadm', 'containers': {'image': 'harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5'}} 2026-03-07T10:13:12.810 INFO:tasks.cephadm:Provided image contains tag or digest, using it as is 2026-03-07T10:13:12.810 INFO:tasks.cephadm:Cluster image is harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 2026-03-07T10:13:12.810 INFO:tasks.cephadm:Cluster fsid is 3fd6e214-1a0e-11f1-b256-99cfc35f3328 2026-03-07T10:13:12.810 INFO:tasks.cephadm:Choosing monitor IPs and ports... 2026-03-07T10:13:12.810 INFO:tasks.cephadm:Monitor IPs: {'mon.a': '192.168.123.101', 'mon.b': '192.168.123.104', 'mon.c': '192.168.123.107'} 2026-03-07T10:13:12.810 INFO:tasks.cephadm:First mon is mon.a on vm01 2026-03-07T10:13:12.810 INFO:tasks.cephadm:First mgr is a 2026-03-07T10:13:12.810 INFO:tasks.cephadm:Normalizing hostnames... 2026-03-07T10:13:12.810 DEBUG:teuthology.orchestra.run.vm01:> sudo hostname $(hostname -s) 2026-03-07T10:13:12.842 DEBUG:teuthology.orchestra.run.vm04:> sudo hostname $(hostname -s) 2026-03-07T10:13:12.869 DEBUG:teuthology.orchestra.run.vm07:> sudo hostname $(hostname -s) 2026-03-07T10:13:12.905 INFO:tasks.cephadm:Downloading cephadm from url: https://download.ceph.com/rpm-19.2.3/el9/noarch/cephadm 2026-03-07T10:13:12.905 DEBUG:teuthology.orchestra.run.vm01:> curl --silent -L https://download.ceph.com/rpm-19.2.3/el9/noarch/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-07T10:13:13.964 INFO:teuthology.orchestra.run.vm01.stdout:-rw-r--r--. 1 ubuntu ubuntu 787672 Mar 7 10:13 /home/ubuntu/cephtest/cephadm 2026-03-07T10:13:13.964 DEBUG:teuthology.orchestra.run.vm04:> curl --silent -L https://download.ceph.com/rpm-19.2.3/el9/noarch/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-07T10:13:14.984 INFO:teuthology.orchestra.run.vm04.stdout:-rw-r--r--. 1 ubuntu ubuntu 787672 Mar 7 10:13 /home/ubuntu/cephtest/cephadm 2026-03-07T10:13:14.984 DEBUG:teuthology.orchestra.run.vm07:> curl --silent -L https://download.ceph.com/rpm-19.2.3/el9/noarch/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-07T10:13:16.077 INFO:teuthology.orchestra.run.vm07.stdout:-rw-r--r--. 1 ubuntu ubuntu 787672 Mar 7 10:13 /home/ubuntu/cephtest/cephadm 2026-03-07T10:13:16.077 DEBUG:teuthology.orchestra.run.vm01:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-07T10:13:16.092 DEBUG:teuthology.orchestra.run.vm04:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-07T10:13:16.109 DEBUG:teuthology.orchestra.run.vm07:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-07T10:13:16.135 INFO:tasks.cephadm:Pulling image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 on all hosts... 2026-03-07T10:13:16.135 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 pull 2026-03-07T10:13:16.137 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 pull 2026-03-07T10:13:16.151 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 pull 2026-03-07T10:13:16.303 INFO:teuthology.orchestra.run.vm01.stderr:Pulling container image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5... 2026-03-07T10:13:16.331 INFO:teuthology.orchestra.run.vm04.stderr:Pulling container image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5... 2026-03-07T10:13:16.350 INFO:teuthology.orchestra.run.vm07.stderr:Pulling container image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5... 2026-03-07T10:13:46.125 INFO:teuthology.orchestra.run.vm07.stdout:{ 2026-03-07T10:13:46.125 INFO:teuthology.orchestra.run.vm07.stdout: "ceph_version": "ceph version 19.2.3-39-g340d3c24fc6 (340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) squid (stable)", 2026-03-07T10:13:46.125 INFO:teuthology.orchestra.run.vm07.stdout: "image_id": "8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1", 2026-03-07T10:13:46.125 INFO:teuthology.orchestra.run.vm07.stdout: "repo_digests": [ 2026-03-07T10:13:46.125 INFO:teuthology.orchestra.run.vm07.stdout: "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0" 2026-03-07T10:13:46.125 INFO:teuthology.orchestra.run.vm07.stdout: ] 2026-03-07T10:13:46.125 INFO:teuthology.orchestra.run.vm07.stdout:} 2026-03-07T10:13:46.134 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-07T10:13:46.134 INFO:teuthology.orchestra.run.vm04.stdout: "ceph_version": "ceph version 19.2.3-39-g340d3c24fc6 (340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) squid (stable)", 2026-03-07T10:13:46.134 INFO:teuthology.orchestra.run.vm04.stdout: "image_id": "8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1", 2026-03-07T10:13:46.134 INFO:teuthology.orchestra.run.vm04.stdout: "repo_digests": [ 2026-03-07T10:13:46.135 INFO:teuthology.orchestra.run.vm04.stdout: "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0" 2026-03-07T10:13:46.135 INFO:teuthology.orchestra.run.vm04.stdout: ] 2026-03-07T10:13:46.135 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-07T10:13:46.265 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-03-07T10:13:46.265 INFO:teuthology.orchestra.run.vm01.stdout: "ceph_version": "ceph version 19.2.3-39-g340d3c24fc6 (340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) squid (stable)", 2026-03-07T10:13:46.266 INFO:teuthology.orchestra.run.vm01.stdout: "image_id": "8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1", 2026-03-07T10:13:46.266 INFO:teuthology.orchestra.run.vm01.stdout: "repo_digests": [ 2026-03-07T10:13:46.266 INFO:teuthology.orchestra.run.vm01.stdout: "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0" 2026-03-07T10:13:46.266 INFO:teuthology.orchestra.run.vm01.stdout: ] 2026-03-07T10:13:46.266 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-03-07T10:13:46.282 DEBUG:teuthology.orchestra.run.vm01:> sudo mkdir -p /etc/ceph 2026-03-07T10:13:46.307 DEBUG:teuthology.orchestra.run.vm04:> sudo mkdir -p /etc/ceph 2026-03-07T10:13:46.336 DEBUG:teuthology.orchestra.run.vm07:> sudo mkdir -p /etc/ceph 2026-03-07T10:13:46.361 DEBUG:teuthology.orchestra.run.vm01:> sudo chmod 777 /etc/ceph 2026-03-07T10:13:46.384 DEBUG:teuthology.orchestra.run.vm04:> sudo chmod 777 /etc/ceph 2026-03-07T10:13:46.408 DEBUG:teuthology.orchestra.run.vm07:> sudo chmod 777 /etc/ceph 2026-03-07T10:13:46.431 INFO:tasks.cephadm:Writing seed config... 2026-03-07T10:13:46.432 INFO:tasks.cephadm: override: [global] mon election default strategy = 3 2026-03-07T10:13:46.432 INFO:tasks.cephadm: override: [mgr] debug mgr = 20 2026-03-07T10:13:46.432 INFO:tasks.cephadm: override: [mgr] debug ms = 1 2026-03-07T10:13:46.432 INFO:tasks.cephadm: override: [mgr] mgr/cephadm/use_agent = True 2026-03-07T10:13:46.432 INFO:tasks.cephadm: override: [mon] debug mon = 20 2026-03-07T10:13:46.432 INFO:tasks.cephadm: override: [mon] debug ms = 1 2026-03-07T10:13:46.432 INFO:tasks.cephadm: override: [mon] debug paxos = 20 2026-03-07T10:13:46.432 INFO:tasks.cephadm: override: [osd] debug ms = 1 2026-03-07T10:13:46.432 INFO:tasks.cephadm: override: [osd] debug osd = 20 2026-03-07T10:13:46.432 INFO:tasks.cephadm: override: [osd] osd mclock iops capacity threshold hdd = 49000 2026-03-07T10:13:46.432 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-03-07T10:13:46.432 DEBUG:teuthology.orchestra.run.vm01:> dd of=/home/ubuntu/cephtest/seed.ceph.conf 2026-03-07T10:13:46.447 DEBUG:tasks.cephadm:Final config: [global] # make logging friendly to teuthology log_to_file = true log_to_stderr = false log to journald = false mon cluster log to file = true mon cluster log file level = debug mon clock drift allowed = 1.000 # replicate across OSDs, not hosts osd crush chooseleaf type = 0 #osd pool default size = 2 osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd # enable some debugging auth debug = true ms die on old message = true ms die on bug = true debug asserts on shutdown = true # adjust warnings mon max pg per osd = 10000# >= luminous mon pg warn max object skew = 0 mon osd allow primary affinity = true mon osd allow pg remap = true mon warn on legacy crush tunables = false mon warn on crush straw calc version zero = false mon warn on no sortbitwise = false mon warn on osd down out interval zero = false mon warn on too few osds = false mon_warn_on_pool_pg_num_not_power_of_two = false # disable pg_autoscaler by default for new pools osd_pool_default_pg_autoscale_mode = off # tests delete pools mon allow pool delete = true fsid = 3fd6e214-1a0e-11f1-b256-99cfc35f3328 mon election default strategy = 3 [osd] osd scrub load threshold = 5.0 osd scrub max interval = 600 osd mclock profile = high_recovery_ops osd recover clone overlap = true osd recovery max chunk = 1048576 osd deep scrub update digest min age = 30 osd map max advance = 10 osd memory target autotune = true # debugging osd debug shutdown = true osd debug op order = true osd debug verify stray on activate = true osd debug pg log writeout = true osd debug verify cached snaps = true osd debug verify missing on start = true osd debug misdirected ops = true osd op queue = debug_random osd op queue cut off = debug_random osd shutdown pgref assert = true bdev debug aio = true osd sloppy crc = true debug ms = 1 debug osd = 20 osd mclock iops capacity threshold hdd = 49000 [mgr] mon reweight min pgs per osd = 4 mon reweight min bytes per osd = 10 mgr/telemetry/nag = false debug mgr = 20 debug ms = 1 mgr/cephadm/use_agent = True [mon] mon data avail warn = 5 mon mgr mkfs grace = 240 mon reweight min pgs per osd = 4 mon osd reporter subtree level = osd mon osd prime pg temp = true mon reweight min bytes per osd = 10 # rotate auth tickets quickly to exercise renewal paths auth mon ticket ttl = 660# 11m auth service ticket ttl = 240# 4m # don't complain about global id reclaim mon_warn_on_insecure_global_id_reclaim = false mon_warn_on_insecure_global_id_reclaim_allowed = false debug mon = 20 debug ms = 1 debug paxos = 20 [client.rgw] rgw cache enabled = true rgw enable ops log = true rgw enable usage log = true 2026-03-07T10:13:46.447 DEBUG:teuthology.orchestra.run.vm01:mon.a> sudo journalctl -f -n 0 -u ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@mon.a.service 2026-03-07T10:13:46.489 DEBUG:teuthology.orchestra.run.vm01:mgr.a> sudo journalctl -f -n 0 -u ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@mgr.a.service 2026-03-07T10:13:46.531 INFO:tasks.cephadm:Bootstrapping... 2026-03-07T10:13:46.531 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 -v bootstrap --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 --config /home/ubuntu/cephtest/seed.ceph.conf --output-config /etc/ceph/ceph.conf --output-keyring /etc/ceph/ceph.client.admin.keyring --output-pub-ssh-key /home/ubuntu/cephtest/ceph.pub --mon-id a --mgr-id a --orphan-initial-daemons --skip-monitoring-stack --mon-ip 192.168.123.101 --skip-admin-label && sudo chmod +r /etc/ceph/ceph.client.admin.keyring 2026-03-07T10:13:46.668 INFO:teuthology.orchestra.run.vm01.stdout:-------------------------------------------------------------------------------- 2026-03-07T10:13:46.668 INFO:teuthology.orchestra.run.vm01.stdout:cephadm ['--image', 'harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5', '-v', 'bootstrap', '--fsid', '3fd6e214-1a0e-11f1-b256-99cfc35f3328', '--config', '/home/ubuntu/cephtest/seed.ceph.conf', '--output-config', '/etc/ceph/ceph.conf', '--output-keyring', '/etc/ceph/ceph.client.admin.keyring', '--output-pub-ssh-key', '/home/ubuntu/cephtest/ceph.pub', '--mon-id', 'a', '--mgr-id', 'a', '--orphan-initial-daemons', '--skip-monitoring-stack', '--mon-ip', '192.168.123.101', '--skip-admin-label'] 2026-03-07T10:13:46.668 INFO:teuthology.orchestra.run.vm01.stderr:Specifying an fsid for your cluster offers no advantages and may increase the likelihood of fsid conflicts. 2026-03-07T10:13:46.669 INFO:teuthology.orchestra.run.vm01.stdout:Verifying podman|docker is present... 2026-03-07T10:13:46.688 INFO:teuthology.orchestra.run.vm01.stdout:/bin/podman: stdout 5.8.0 2026-03-07T10:13:46.688 INFO:teuthology.orchestra.run.vm01.stdout:Verifying lvm2 is present... 2026-03-07T10:13:46.688 INFO:teuthology.orchestra.run.vm01.stdout:Verifying time synchronization is in place... 2026-03-07T10:13:46.695 INFO:teuthology.orchestra.run.vm01.stdout:Non-zero exit code 1 from systemctl is-enabled chrony.service 2026-03-07T10:13:46.695 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stderr Failed to get unit file state for chrony.service: No such file or directory 2026-03-07T10:13:46.701 INFO:teuthology.orchestra.run.vm01.stdout:Non-zero exit code 3 from systemctl is-active chrony.service 2026-03-07T10:13:46.701 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stdout inactive 2026-03-07T10:13:46.709 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stdout enabled 2026-03-07T10:13:46.714 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stdout active 2026-03-07T10:13:46.714 INFO:teuthology.orchestra.run.vm01.stdout:Unit chronyd.service is enabled and running 2026-03-07T10:13:46.714 INFO:teuthology.orchestra.run.vm01.stdout:Repeating the final host check... 2026-03-07T10:13:46.733 INFO:teuthology.orchestra.run.vm01.stdout:/bin/podman: stdout 5.8.0 2026-03-07T10:13:46.733 INFO:teuthology.orchestra.run.vm01.stdout:podman (/bin/podman) version 5.8.0 is present 2026-03-07T10:13:46.733 INFO:teuthology.orchestra.run.vm01.stdout:systemctl is present 2026-03-07T10:13:46.733 INFO:teuthology.orchestra.run.vm01.stdout:lvcreate is present 2026-03-07T10:13:46.739 INFO:teuthology.orchestra.run.vm01.stdout:Non-zero exit code 1 from systemctl is-enabled chrony.service 2026-03-07T10:13:46.739 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stderr Failed to get unit file state for chrony.service: No such file or directory 2026-03-07T10:13:46.743 INFO:teuthology.orchestra.run.vm01.stdout:Non-zero exit code 3 from systemctl is-active chrony.service 2026-03-07T10:13:46.743 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stdout inactive 2026-03-07T10:13:46.750 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stdout enabled 2026-03-07T10:13:46.755 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stdout active 2026-03-07T10:13:46.755 INFO:teuthology.orchestra.run.vm01.stdout:Unit chronyd.service is enabled and running 2026-03-07T10:13:46.755 INFO:teuthology.orchestra.run.vm01.stdout:Host looks OK 2026-03-07T10:13:46.755 INFO:teuthology.orchestra.run.vm01.stdout:Cluster fsid: 3fd6e214-1a0e-11f1-b256-99cfc35f3328 2026-03-07T10:13:46.755 INFO:teuthology.orchestra.run.vm01.stdout:Acquiring lock 139866651457664 on /run/cephadm/3fd6e214-1a0e-11f1-b256-99cfc35f3328.lock 2026-03-07T10:13:46.755 INFO:teuthology.orchestra.run.vm01.stdout:Lock 139866651457664 acquired on /run/cephadm/3fd6e214-1a0e-11f1-b256-99cfc35f3328.lock 2026-03-07T10:13:46.755 INFO:teuthology.orchestra.run.vm01.stdout:Verifying IP 192.168.123.101 port 3300 ... 2026-03-07T10:13:46.756 INFO:teuthology.orchestra.run.vm01.stdout:Verifying IP 192.168.123.101 port 6789 ... 2026-03-07T10:13:46.756 INFO:teuthology.orchestra.run.vm01.stdout:Base mon IP(s) is [192.168.123.101:3300, 192.168.123.101:6789], mon addrv is [v2:192.168.123.101:3300,v1:192.168.123.101:6789] 2026-03-07T10:13:46.759 INFO:teuthology.orchestra.run.vm01.stdout:/sbin/ip: stdout default via 192.168.123.1 dev eth0 proto dhcp src 192.168.123.101 metric 100 2026-03-07T10:13:46.759 INFO:teuthology.orchestra.run.vm01.stdout:/sbin/ip: stdout 192.168.123.0/24 dev eth0 proto kernel scope link src 192.168.123.101 metric 100 2026-03-07T10:13:46.761 INFO:teuthology.orchestra.run.vm01.stdout:/sbin/ip: stdout ::1 dev lo proto kernel metric 256 pref medium 2026-03-07T10:13:46.761 INFO:teuthology.orchestra.run.vm01.stdout:/sbin/ip: stdout fe80::/64 dev eth0 proto kernel metric 1024 pref medium 2026-03-07T10:13:46.763 INFO:teuthology.orchestra.run.vm01.stdout:/sbin/ip: stdout 1: lo: mtu 65536 state UNKNOWN qlen 1000 2026-03-07T10:13:46.763 INFO:teuthology.orchestra.run.vm01.stdout:/sbin/ip: stdout inet6 ::1/128 scope host 2026-03-07T10:13:46.763 INFO:teuthology.orchestra.run.vm01.stdout:/sbin/ip: stdout valid_lft forever preferred_lft forever 2026-03-07T10:13:46.763 INFO:teuthology.orchestra.run.vm01.stdout:/sbin/ip: stdout 2: eth0: mtu 1500 state UP qlen 1000 2026-03-07T10:13:46.763 INFO:teuthology.orchestra.run.vm01.stdout:/sbin/ip: stdout inet6 fe80::5055:ff:fe00:1/64 scope link noprefixroute 2026-03-07T10:13:46.763 INFO:teuthology.orchestra.run.vm01.stdout:/sbin/ip: stdout valid_lft forever preferred_lft forever 2026-03-07T10:13:46.764 INFO:teuthology.orchestra.run.vm01.stdout:Mon IP `192.168.123.101` is in CIDR network `192.168.123.0/24` 2026-03-07T10:13:46.764 INFO:teuthology.orchestra.run.vm01.stdout:Mon IP `192.168.123.101` is in CIDR network `192.168.123.0/24` 2026-03-07T10:13:46.764 INFO:teuthology.orchestra.run.vm01.stdout:Inferred mon public CIDR from local network configuration ['192.168.123.0/24', '192.168.123.0/24'] 2026-03-07T10:13:46.764 INFO:teuthology.orchestra.run.vm01.stdout:Internal network (--cluster-network) has not been provided, OSD replication will default to the public_network 2026-03-07T10:13:46.765 INFO:teuthology.orchestra.run.vm01.stdout:Pulling container image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5... 2026-03-07T10:13:47.566 INFO:teuthology.orchestra.run.vm01.stdout:/bin/podman: stdout 8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1 2026-03-07T10:13:47.566 INFO:teuthology.orchestra.run.vm01.stdout:/bin/podman: stderr Trying to pull harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5... 2026-03-07T10:13:47.566 INFO:teuthology.orchestra.run.vm01.stdout:/bin/podman: stderr Getting image source signatures 2026-03-07T10:13:47.567 INFO:teuthology.orchestra.run.vm01.stdout:/bin/podman: stderr Copying blob sha256:89f108f95c9b33ae21c5514f17c1bd5ca646e21d3c5e8ac1e117cf65bcd40261 2026-03-07T10:13:47.567 INFO:teuthology.orchestra.run.vm01.stdout:/bin/podman: stderr Copying config sha256:8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1 2026-03-07T10:13:47.567 INFO:teuthology.orchestra.run.vm01.stdout:/bin/podman: stderr Writing manifest to image destination 2026-03-07T10:13:47.811 INFO:teuthology.orchestra.run.vm01.stdout:ceph: stdout ceph version 19.2.3-39-g340d3c24fc6 (340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) squid (stable) 2026-03-07T10:13:47.811 INFO:teuthology.orchestra.run.vm01.stdout:Ceph version: ceph version 19.2.3-39-g340d3c24fc6 (340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) squid (stable) 2026-03-07T10:13:47.811 INFO:teuthology.orchestra.run.vm01.stdout:Extracting ceph user uid/gid from container image... 2026-03-07T10:13:47.876 INFO:teuthology.orchestra.run.vm01.stdout:stat: stdout 167 167 2026-03-07T10:13:47.876 INFO:teuthology.orchestra.run.vm01.stdout:Creating initial keys... 2026-03-07T10:13:47.970 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-authtool: stdout AQBb+qtpd5GwOBAAsR8+0miUsHUqmFqMiIN0Hg== 2026-03-07T10:13:48.069 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-authtool: stdout AQBc+qtpFEHeAhAAtcnyMHQUMy+u3X/xO+dEwg== 2026-03-07T10:13:48.172 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph-authtool: stdout AQBc+qtpg0Y8CBAAffjcBIqw8I9cmZl2GvqW8A== 2026-03-07T10:13:48.172 INFO:teuthology.orchestra.run.vm01.stdout:Creating initial monmap... 2026-03-07T10:13:48.275 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-07T10:13:48.275 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/monmaptool: stdout setting min_mon_release = quincy 2026-03-07T10:13:48.275 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: set fsid to 3fd6e214-1a0e-11f1-b256-99cfc35f3328 2026-03-07T10:13:48.275 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-07T10:13:48.275 INFO:teuthology.orchestra.run.vm01.stdout:monmaptool for a [v2:192.168.123.101:3300,v1:192.168.123.101:6789] on /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-07T10:13:48.275 INFO:teuthology.orchestra.run.vm01.stdout:setting min_mon_release = quincy 2026-03-07T10:13:48.275 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/monmaptool: set fsid to 3fd6e214-1a0e-11f1-b256-99cfc35f3328 2026-03-07T10:13:48.275 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-07T10:13:48.275 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-07T10:13:48.275 INFO:teuthology.orchestra.run.vm01.stdout:Creating mon... 2026-03-07T10:13:48.374 INFO:teuthology.orchestra.run.vm01.stdout:create mon.a on 2026-03-07T10:13:48.637 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stderr Created symlink /etc/systemd/system/multi-user.target.wants/ceph.target → /etc/systemd/system/ceph.target. 2026-03-07T10:13:48.748 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stderr Created symlink /etc/systemd/system/multi-user.target.wants/ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328.target → /etc/systemd/system/ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328.target. 2026-03-07T10:13:48.749 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph.target.wants/ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328.target → /etc/systemd/system/ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328.target. 2026-03-07T10:13:48.884 INFO:teuthology.orchestra.run.vm01.stdout:Non-zero exit code 1 from systemctl reset-failed ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@mon.a 2026-03-07T10:13:48.884 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stderr Failed to reset failed state of unit ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@mon.a.service: Unit ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@mon.a.service not loaded. 2026-03-07T10:13:49.014 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328.target.wants/ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@mon.a.service → /etc/systemd/system/ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@.service. 2026-03-07T10:13:49.180 INFO:teuthology.orchestra.run.vm01.stdout:firewalld does not appear to be present 2026-03-07T10:13:49.180 INFO:teuthology.orchestra.run.vm01.stdout:Not possible to enable service . firewalld.service is not available 2026-03-07T10:13:49.180 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for mon to start... 2026-03-07T10:13:49.180 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for mon... 2026-03-07T10:13:49.478 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout cluster: 2026-03-07T10:13:49.478 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout id: 3fd6e214-1a0e-11f1-b256-99cfc35f3328 2026-03-07T10:13:49.478 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout health: HEALTH_OK 2026-03-07T10:13:49.478 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-03-07T10:13:49.478 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout services: 2026-03-07T10:13:49.478 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mon: 1 daemons, quorum a (age 0.227585s) 2026-03-07T10:13:49.478 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mgr: no daemons active 2026-03-07T10:13:49.478 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout osd: 0 osds: 0 up, 0 in 2026-03-07T10:13:49.478 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-03-07T10:13:49.478 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout data: 2026-03-07T10:13:49.479 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout pools: 0 pools, 0 pgs 2026-03-07T10:13:49.479 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout objects: 0 objects, 0 B 2026-03-07T10:13:49.479 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout usage: 0 B used, 0 B / 0 B avail 2026-03-07T10:13:49.479 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout pgs: 2026-03-07T10:13:49.479 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-03-07T10:13:49.479 INFO:teuthology.orchestra.run.vm01.stdout:mon is available 2026-03-07T10:13:49.479 INFO:teuthology.orchestra.run.vm01.stdout:Assimilating anything we can from ceph.conf... 2026-03-07T10:13:49.745 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-03-07T10:13:49.745 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout [global] 2026-03-07T10:13:49.745 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout fsid = 3fd6e214-1a0e-11f1-b256-99cfc35f3328 2026-03-07T10:13:49.745 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mon_cluster_log_file_level = debug 2026-03-07T10:13:49.745 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mon_host = [v2:192.168.123.101:3300,v1:192.168.123.101:6789] 2026-03-07T10:13:49.745 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mon_osd_allow_pg_remap = true 2026-03-07T10:13:49.745 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mon_osd_allow_primary_affinity = true 2026-03-07T10:13:49.745 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mon_warn_on_no_sortbitwise = false 2026-03-07T10:13:49.745 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout osd_crush_chooseleaf_type = 0 2026-03-07T10:13:49.745 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-03-07T10:13:49.745 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout [mgr] 2026-03-07T10:13:49.745 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mgr/cephadm/use_agent = True 2026-03-07T10:13:49.745 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mgr/telemetry/nag = false 2026-03-07T10:13:49.745 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-03-07T10:13:49.745 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout [osd] 2026-03-07T10:13:49.745 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout osd_map_max_advance = 10 2026-03-07T10:13:49.745 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout osd_sloppy_crc = true 2026-03-07T10:13:49.745 INFO:teuthology.orchestra.run.vm01.stdout:Generating new minimal ceph.conf... 2026-03-07T10:13:50.042 INFO:teuthology.orchestra.run.vm01.stdout:Restarting the monitor... 2026-03-07T10:13:50.115 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 systemd[1]: Stopping Ceph mon.a for 3fd6e214-1a0e-11f1-b256-99cfc35f3328... 2026-03-07T10:13:50.368 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mon-a[49312]: 2026-03-07T10:13:50.113+0000 7f33fc41d640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.a -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-07T10:13:50.368 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mon-a[49312]: 2026-03-07T10:13:50.113+0000 7f33fc41d640 -1 mon.a@0(leader) e1 *** Got Signal Terminated *** 2026-03-07T10:13:50.368 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 podman[49516]: 2026-03-07 10:13:50.359269075 +0000 UTC m=+0.259433242 container died 239cd7d3c72c996af6d4be013c12e0ccb1c82706eac77f1306f59bd5854baeb8 (image=harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mon-a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) 2026-03-07T10:13:50.550 INFO:teuthology.orchestra.run.vm01.stdout:Setting public_network to 192.168.123.0/24 in mon config section 2026-03-07T10:13:50.627 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 podman[49516]: 2026-03-07 10:13:50.374773914 +0000 UTC m=+0.274938081 container remove 239cd7d3c72c996af6d4be013c12e0ccb1c82706eac77f1306f59bd5854baeb8 (image=harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mon-a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) 2026-03-07T10:13:50.627 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 bash[49516]: ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mon-a 2026-03-07T10:13:50.627 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 systemd[1]: ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@mon.a.service: Deactivated successfully. 2026-03-07T10:13:50.627 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 systemd[1]: Stopped Ceph mon.a for 3fd6e214-1a0e-11f1-b256-99cfc35f3328. 2026-03-07T10:13:50.627 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 systemd[1]: Starting Ceph mon.a for 3fd6e214-1a0e-11f1-b256-99cfc35f3328... 2026-03-07T10:13:50.627 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 podman[49588]: 2026-03-07 10:13:50.508468756 +0000 UTC m=+0.015572034 container create 8e32f13a6599b3f5c501b3a839e2606ca98096aa4c3b590f2925970a9c2e0594 (image=harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mon-a, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default) 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 podman[49588]: 2026-03-07 10:13:50.540059247 +0000 UTC m=+0.047162536 container init 8e32f13a6599b3f5c501b3a839e2606ca98096aa4c3b590f2925970a9c2e0594 (image=harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mon-a, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 podman[49588]: 2026-03-07 10:13:50.543094297 +0000 UTC m=+0.050197586 container start 8e32f13a6599b3f5c501b3a839e2606ca98096aa4c3b590f2925970a9c2e0594 (image=harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mon-a, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6) 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 bash[49588]: 8e32f13a6599b3f5c501b3a839e2606ca98096aa4c3b590f2925970a9c2e0594 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 podman[49588]: 2026-03-07 10:13:50.501903548 +0000 UTC m=+0.009006837 image pull 8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1 harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 systemd[1]: Started Ceph mon.a for 3fd6e214-1a0e-11f1-b256-99cfc35f3328. 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: set uid:gid to 167:167 (ceph:ceph) 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: ceph version 19.2.3-39-g340d3c24fc6 (340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) squid (stable), process ceph-mon, pid 2 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: pidfile_write: ignore empty --pid-file 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: load: jerasure load: lrc 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: RocksDB version: 7.9.2 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Git sha 0 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Compile date 2026-03-06 13:52:12 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: DB SUMMARY 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: DB Session ID: P8EV0ZZK1EBXUQY0OLF0 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: CURRENT file: CURRENT 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: IDENTITY file: IDENTITY 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: MANIFEST file: MANIFEST-000010 size: 179 Bytes 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: SST files in /var/lib/ceph/mon/ceph-a/store.db dir, Total Num: 1, files: 000008.sst 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-a/store.db: 000009.log size: 75679 ; 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.error_if_exists: 0 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.create_if_missing: 0 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.paranoid_checks: 1 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.flush_verify_memtable_count: 1 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.env: 0x55aab67f3ca0 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.fs: PosixFileSystem 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.info_log: 0x55aab8372320 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.max_file_opening_threads: 16 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.statistics: (nil) 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.use_fsync: 0 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.max_log_file_size: 0 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.keep_log_file_num: 1000 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.recycle_log_file_num: 0 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.allow_fallocate: 1 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.allow_mmap_reads: 0 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.allow_mmap_writes: 0 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.use_direct_reads: 0 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.create_missing_column_families: 0 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.db_log_dir: 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.wal_dir: 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.advise_random_on_open: 1 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.db_write_buffer_size: 0 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.write_buffer_manager: 0x55aab8377900 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.rate_limiter: (nil) 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.wal_recovery_mode: 2 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.enable_thread_tracking: 0 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.enable_pipelined_write: 0 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.unordered_write: 0 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-07T10:13:50.628 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.row_cache: None 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.wal_filter: None 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.allow_ingest_behind: 0 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.two_write_queues: 0 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.manual_wal_flush: 0 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.wal_compression: 0 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.atomic_flush: 0 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.log_readahead_size: 0 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.best_efforts_recovery: 0 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.allow_data_in_errors: 0 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.db_host_id: __hostname__ 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.enforce_single_del_contracts: true 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.max_background_jobs: 2 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.max_background_compactions: -1 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.max_subcompactions: 1 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.max_total_wal_size: 0 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.max_open_files: -1 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.bytes_per_sync: 0 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.compaction_readahead_size: 0 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.max_background_flushes: -1 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Compression algorithms supported: 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: kZSTD supported: 0 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: kXpressCompression supported: 0 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: kBZip2Compression supported: 0 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: kLZ4Compression supported: 1 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: kZlibCompression supported: 1 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: kLZ4HCCompression supported: 1 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: kSnappyCompression supported: 1 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: DMutex implementation: pthread_mutex_t 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-a/store.db/MANIFEST-000010 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.merge_operator: 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.compaction_filter: None 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.compaction_filter_factory: None 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.sst_partitioner_factory: None 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x55aab83723e0) 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout: cache_index_and_filter_blocks: 1 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout: pin_top_level_index_and_filter: 1 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout: index_type: 0 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout: data_block_index_type: 0 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout: index_shortening: 1 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout: checksum: 4 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout: no_block_cache: 0 2026-03-07T10:13:50.629 INFO:journalctl@ceph.mon.a.vm01.stdout: block_cache: 0x55aab83971f0 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout: block_cache_name: BinnedLRUCache 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout: block_cache_options: 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout: capacity : 536870912 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout: num_shard_bits : 4 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout: strict_capacity_limit : 0 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout: high_pri_pool_ratio: 0.000 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout: block_cache_compressed: (nil) 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout: persistent_cache: (nil) 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout: block_size: 4096 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout: block_size_deviation: 10 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout: block_restart_interval: 16 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout: index_block_restart_interval: 1 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout: metadata_block_size: 4096 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout: partition_filters: 0 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout: use_delta_encoding: 1 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout: filter_policy: bloomfilter 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout: whole_key_filtering: 1 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout: verify_compression: 0 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout: read_amp_bytes_per_bit: 0 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout: format_version: 5 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout: enable_index_compression: 1 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout: block_align: 0 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout: max_auto_readahead_size: 262144 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout: prepopulate_block_cache: 0 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout: initial_auto_readahead_size: 8192 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout: num_file_reads_for_auto_readahead: 2 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.write_buffer_size: 33554432 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.max_write_buffer_number: 2 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.compression: NoCompression 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.bottommost_compression: Disabled 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.prefix_extractor: nullptr 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.num_levels: 7 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.compression_opts.level: 32767 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.compression_opts.strategy: 0 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.compression_opts.enabled: false 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.target_file_size_base: 67108864 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-07T10:13:50.630 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.arena_block_size: 1048576 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.disable_auto_compactions: 0 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.inplace_update_support: 0 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.bloom_locality: 0 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.max_successive_merges: 0 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.paranoid_file_checks: 0 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.force_consistency_checks: 1 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.report_bg_io_stats: 0 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.ttl: 2592000 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.preclude_last_level_data_seconds: 0 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.preserve_internal_time_seconds: 0 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.enable_blob_files: false 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.min_blob_size: 0 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.blob_file_size: 268435456 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.blob_compaction_readahead_size: 0 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.blob_file_starting_level: 0 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-a/store.db/MANIFEST-000010 succeeded,manifest_file_number is 10, next_file_number is 12, last_sequence is 5, log_number is 5,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 5 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 5 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: 0a28ef00-dc93-4b0f-a72f-2d0f2ed922ce 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: EVENT_LOG_v1 {"time_micros": 1772878430565119, "job": 1, "event": "recovery_started", "wal_files": [9]} 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #9 mode 2 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: EVENT_LOG_v1 {"time_micros": 1772878430566924, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 13, "file_size": 72760, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 8, "largest_seqno": 225, "table_properties": {"data_size": 71039, "index_size": 174, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 517, "raw_key_size": 9705, "raw_average_key_size": 49, "raw_value_size": 65518, "raw_average_value_size": 334, "num_data_blocks": 8, "num_entries": 196, "num_filter_entries": 196, "num_deletions": 3, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1772878430, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "0a28ef00-dc93-4b0f-a72f-2d0f2ed922ce", "db_session_id": "P8EV0ZZK1EBXUQY0OLF0", "orig_file_number": 13, "seqno_to_time_mapping": "N/A"}} 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: EVENT_LOG_v1 {"time_micros": 1772878430566974, "job": 1, "event": "recovery_finished"} 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: [db/version_set.cc:5047] Creating manifest 15 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-a/store.db/000009.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x55aab8398e00 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: DB pointer 0x55aab84b0000 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: rocksdb: [db/db_impl/db_impl.cc:1111] 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout: ** DB Stats ** 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-07T10:13:50.631 INFO:journalctl@ceph.mon.a.vm01.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout: 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout: ** Compaction Stats [default] ** 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout: L0 2/0 72.91 KB 0.5 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 53.6 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout: Sum 2/0 72.91 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 53.6 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 53.6 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout: 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout: ** Compaction Stats [default] ** 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 53.6 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout: 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout: 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout: Flush(GB): cumulative 0.000, interval 0.000 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout: Cumulative compaction: 0.00 GB write, 15.01 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout: Interval compaction: 0.00 GB write, 15.01 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout: Block cache BinnedLRUCache@0x55aab83971f0#2 capacity: 512.00 MB usage: 1.06 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 5.3e-05 secs_since: 0 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout: Block cache entry stats(count,size,portion): FilterBlock(2,0.70 KB,0.00013411%) IndexBlock(2,0.36 KB,6.85453e-05%) Misc(1,0.00 KB,0%) 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout: 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: starting mon.a rank 0 at public addrs [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] at bind addrs [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] mon_data /var/lib/ceph/mon/ceph-a fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: mon.a@-1(???) e1 preinit fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: mon.a@-1(???).mds e1 new map 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: mon.a@-1(???).mds e1 print_map 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout: e1 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout: btime 2026-03-07T10:13:49:200453+0000 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2,11=minor log segments,12=quiesce subvolumes} 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout: legacy client fscid: -1 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout: 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout: No filesystems configured 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: mon.a@-1(???).osd e1 crush map has features 3314932999778484224, adjusting msgr requires 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: mon.a@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: mon.a@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: mon.a@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: mon.a@-1(???).paxosservice(auth 1..2) refresh upgraded, format 0 -> 3 2026-03-07T10:13:50.632 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: mon.a is new leader, mons a in quorum (ranks 0) 2026-03-07T10:13:50.830 INFO:teuthology.orchestra.run.vm01.stdout:Wrote config to /etc/ceph/ceph.conf 2026-03-07T10:13:50.832 INFO:teuthology.orchestra.run.vm01.stdout:Wrote keyring to /etc/ceph/ceph.client.admin.keyring 2026-03-07T10:13:50.832 INFO:teuthology.orchestra.run.vm01.stdout:Creating mgr... 2026-03-07T10:13:50.832 INFO:teuthology.orchestra.run.vm01.stdout:Verifying port 0.0.0.0:9283 ... 2026-03-07T10:13:50.832 INFO:teuthology.orchestra.run.vm01.stdout:Verifying port 0.0.0.0:8765 ... 2026-03-07T10:13:50.898 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: monmap epoch 1 2026-03-07T10:13:50.898 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 2026-03-07T10:13:50.898 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: last_changed 2026-03-07T10:13:48.241132+0000 2026-03-07T10:13:50.898 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: created 2026-03-07T10:13:48.241132+0000 2026-03-07T10:13:50.898 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: min_mon_release 19 (squid) 2026-03-07T10:13:50.898 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: election_strategy: 1 2026-03-07T10:13:50.899 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: 0: [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] mon.a 2026-03-07T10:13:50.899 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: fsmap 2026-03-07T10:13:50.899 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: osdmap e1: 0 total, 0 up, 0 in 2026-03-07T10:13:50.899 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:50 vm01 ceph-mon[49602]: mgrmap e1: no daemons active 2026-03-07T10:13:50.970 INFO:teuthology.orchestra.run.vm01.stdout:Non-zero exit code 1 from systemctl reset-failed ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@mgr.a 2026-03-07T10:13:50.970 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stderr Failed to reset failed state of unit ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@mgr.a.service: Unit ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@mgr.a.service not loaded. 2026-03-07T10:13:51.094 INFO:teuthology.orchestra.run.vm01.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328.target.wants/ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@mgr.a.service → /etc/systemd/system/ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@.service. 2026-03-07T10:13:51.251 INFO:teuthology.orchestra.run.vm01.stdout:firewalld does not appear to be present 2026-03-07T10:13:51.251 INFO:teuthology.orchestra.run.vm01.stdout:Not possible to enable service . firewalld.service is not available 2026-03-07T10:13:51.251 INFO:teuthology.orchestra.run.vm01.stdout:firewalld does not appear to be present 2026-03-07T10:13:51.251 INFO:teuthology.orchestra.run.vm01.stdout:Not possible to open ports <[9283, 8765]>. firewalld.service is not available 2026-03-07T10:13:51.251 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for mgr to start... 2026-03-07T10:13:51.251 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for mgr... 2026-03-07T10:13:51.546 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-03-07T10:13:51.546 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout { 2026-03-07T10:13:51.546 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "fsid": "3fd6e214-1a0e-11f1-b256-99cfc35f3328", 2026-03-07T10:13:51.546 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "health": { 2026-03-07T10:13:51.546 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-07T10:13:51.546 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-07T10:13:51.546 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-07T10:13:51.546 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:13:51.546 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-07T10:13:51.546 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-07T10:13:51.546 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 0 2026-03-07T10:13:51.546 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:13:51.546 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-07T10:13:51.546 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "a" 2026-03-07T10:13:51.546 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:13:51.546 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum_age": 0, 2026-03-07T10:13:51.546 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-07T10:13:51.546 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:13:51.546 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-07T10:13:51.546 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-07T10:13:51.546 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:13:51.546 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-07T10:13:51.546 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:13:51.546 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-07T10:13:51.546 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-07T10:13:51.546 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-07T10:13:51.546 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-07T10:13:51.546 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-07T10:13:51.546 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-07T10:13:51.546 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:13:51.546 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-07T10:13:51.546 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-07T10:13:51.546 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-07T10:13:51.547 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-07T10:13:51.547 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-07T10:13:51.547 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-07T10:13:51.547 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-07T10:13:51.547 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-07T10:13:51.547 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-07T10:13:51.547 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:13:51.547 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-07T10:13:51.547 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:13:51.547 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "btime": "2026-03-07T10:13:49:200453+0000", 2026-03-07T10:13:51.547 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-07T10:13:51.547 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-07T10:13:51.547 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:13:51.547 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-07T10:13:51.547 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-07T10:13:51.547 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-07T10:13:51.547 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-07T10:13:51.547 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-07T10:13:51.547 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-07T10:13:51.547 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "restful" 2026-03-07T10:13:51.547 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:13:51.547 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-07T10:13:51.547 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:13:51.547 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-07T10:13:51.547 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:13:51.547 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "modified": "2026-03-07T10:13:49.201013+0000", 2026-03-07T10:13:51.547 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-07T10:13:51.547 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:13:51.547 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-07T10:13:51.547 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout } 2026-03-07T10:13:51.548 INFO:teuthology.orchestra.run.vm01.stdout:mgr not available, waiting (1/15)... 2026-03-07T10:13:51.804 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:51 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/2682140876' entity='client.admin' 2026-03-07T10:13:51.804 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:13:51 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:13:51.627+0000 7fd646a62100 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-07T10:13:52.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:51 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/3223506923' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-07T10:13:53.224 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:13:52 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:13:52.754+0000 7fd646a62100 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-07T10:13:53.875 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:13:53 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:13:53.692+0000 7fd646a62100 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-07T10:13:53.875 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:13:53 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:13:53.817+0000 7fd646a62100 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-07T10:13:53.909 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-03-07T10:13:53.909 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout { 2026-03-07T10:13:53.909 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "fsid": "3fd6e214-1a0e-11f1-b256-99cfc35f3328", 2026-03-07T10:13:53.909 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "health": { 2026-03-07T10:13:53.909 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-07T10:13:53.909 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-07T10:13:53.909 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-07T10:13:53.909 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:13:53.909 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-07T10:13:53.909 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-07T10:13:53.909 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 0 2026-03-07T10:13:53.909 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:13:53.910 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-07T10:13:53.910 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "a" 2026-03-07T10:13:53.910 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:13:53.910 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum_age": 3, 2026-03-07T10:13:53.910 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-07T10:13:53.910 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:13:53.910 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-07T10:13:53.910 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-07T10:13:53.910 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:13:53.910 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-07T10:13:53.910 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:13:53.910 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-07T10:13:53.910 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-07T10:13:53.910 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-07T10:13:53.910 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-07T10:13:53.910 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-07T10:13:53.910 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-07T10:13:53.911 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:13:53.911 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-07T10:13:53.911 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-07T10:13:53.911 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-07T10:13:53.911 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-07T10:13:53.911 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-07T10:13:53.911 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-07T10:13:53.911 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-07T10:13:53.911 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-07T10:13:53.911 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-07T10:13:53.911 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:13:53.911 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-07T10:13:53.911 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:13:53.911 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "btime": "2026-03-07T10:13:49:200453+0000", 2026-03-07T10:13:53.911 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-07T10:13:53.911 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-07T10:13:53.911 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:13:53.911 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-07T10:13:53.911 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-07T10:13:53.911 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-07T10:13:53.911 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-07T10:13:53.911 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-07T10:13:53.911 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-07T10:13:53.911 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "restful" 2026-03-07T10:13:53.911 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:13:53.911 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-07T10:13:53.911 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:13:53.911 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-07T10:13:53.911 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:13:53.911 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "modified": "2026-03-07T10:13:49.201013+0000", 2026-03-07T10:13:53.911 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-07T10:13:53.911 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:13:53.911 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-07T10:13:53.911 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout } 2026-03-07T10:13:53.911 INFO:teuthology.orchestra.run.vm01.stdout:mgr not available, waiting (2/15)... 2026-03-07T10:13:54.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:53 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/4050763850' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-07T10:13:54.224 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:13:54 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:13:54.086+0000 7fd646a62100 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-07T10:13:56.169 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:13:55 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:13:55.881+0000 7fd646a62100 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-07T10:13:56.202 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-03-07T10:13:56.202 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout { 2026-03-07T10:13:56.203 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "fsid": "3fd6e214-1a0e-11f1-b256-99cfc35f3328", 2026-03-07T10:13:56.203 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "health": { 2026-03-07T10:13:56.203 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-07T10:13:56.203 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-07T10:13:56.203 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-07T10:13:56.203 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:13:56.203 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-07T10:13:56.203 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-07T10:13:56.203 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 0 2026-03-07T10:13:56.203 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:13:56.203 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-07T10:13:56.203 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "a" 2026-03-07T10:13:56.203 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:13:56.203 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum_age": 5, 2026-03-07T10:13:56.203 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-07T10:13:56.203 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:13:56.203 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-07T10:13:56.203 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-07T10:13:56.203 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:13:56.203 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-07T10:13:56.203 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:13:56.203 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-07T10:13:56.203 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-07T10:13:56.203 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-07T10:13:56.203 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-07T10:13:56.203 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-07T10:13:56.203 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-07T10:13:56.204 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:13:56.204 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-07T10:13:56.204 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-07T10:13:56.204 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-07T10:13:56.204 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-07T10:13:56.204 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-07T10:13:56.204 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-07T10:13:56.204 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-07T10:13:56.204 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-07T10:13:56.204 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-07T10:13:56.204 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:13:56.204 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-07T10:13:56.204 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:13:56.204 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "btime": "2026-03-07T10:13:49:200453+0000", 2026-03-07T10:13:56.204 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-07T10:13:56.204 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-07T10:13:56.204 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:13:56.204 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-07T10:13:56.204 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-07T10:13:56.204 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-07T10:13:56.204 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-07T10:13:56.204 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-07T10:13:56.204 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-07T10:13:56.204 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "restful" 2026-03-07T10:13:56.204 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:13:56.204 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-07T10:13:56.204 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:13:56.204 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-07T10:13:56.204 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:13:56.204 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "modified": "2026-03-07T10:13:49.201013+0000", 2026-03-07T10:13:56.204 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-07T10:13:56.204 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:13:56.204 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-07T10:13:56.204 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout } 2026-03-07T10:13:56.204 INFO:teuthology.orchestra.run.vm01.stdout:mgr not available, waiting (3/15)... 2026-03-07T10:13:56.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:56 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/1167621262' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-07T10:13:56.474 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:13:56 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:13:56.240+0000 7fd646a62100 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-07T10:13:56.474 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:13:56 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:13:56.369+0000 7fd646a62100 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-07T10:13:56.739 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:13:56 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:13:56.488+0000 7fd646a62100 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-07T10:13:56.739 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:13:56 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:13:56.621+0000 7fd646a62100 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-07T10:13:56.739 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:13:56 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:13:56.738+0000 7fd646a62100 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-07T10:13:57.724 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:13:57 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:13:57.242+0000 7fd646a62100 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-07T10:13:57.724 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:13:57 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:13:57.392+0000 7fd646a62100 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-07T10:13:58.474 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:13:58 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:13:58.095+0000 7fd646a62100 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-07T10:13:58.512 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-03-07T10:13:58.513 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout { 2026-03-07T10:13:58.513 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "fsid": "3fd6e214-1a0e-11f1-b256-99cfc35f3328", 2026-03-07T10:13:58.513 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "health": { 2026-03-07T10:13:58.513 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-07T10:13:58.513 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-07T10:13:58.513 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-07T10:13:58.513 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:13:58.513 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-07T10:13:58.513 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-07T10:13:58.513 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 0 2026-03-07T10:13:58.513 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:13:58.513 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-07T10:13:58.513 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "a" 2026-03-07T10:13:58.513 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:13:58.513 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum_age": 7, 2026-03-07T10:13:58.513 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-07T10:13:58.513 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:13:58.513 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-07T10:13:58.513 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-07T10:13:58.513 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:13:58.513 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-07T10:13:58.513 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:13:58.513 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-07T10:13:58.513 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-07T10:13:58.513 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-07T10:13:58.513 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-07T10:13:58.513 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-07T10:13:58.513 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-07T10:13:58.513 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:13:58.513 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-07T10:13:58.513 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-07T10:13:58.513 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-07T10:13:58.513 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-07T10:13:58.513 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-07T10:13:58.513 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-07T10:13:58.514 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-07T10:13:58.514 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-07T10:13:58.514 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-07T10:13:58.514 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:13:58.514 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-07T10:13:58.514 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:13:58.514 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "btime": "2026-03-07T10:13:49:200453+0000", 2026-03-07T10:13:58.514 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-07T10:13:58.514 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-07T10:13:58.514 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:13:58.514 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-07T10:13:58.514 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-07T10:13:58.514 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-07T10:13:58.514 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-07T10:13:58.514 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-07T10:13:58.514 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-07T10:13:58.515 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "restful" 2026-03-07T10:13:58.515 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:13:58.515 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-07T10:13:58.515 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:13:58.515 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-07T10:13:58.515 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:13:58.515 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "modified": "2026-03-07T10:13:49.201013+0000", 2026-03-07T10:13:58.515 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-07T10:13:58.515 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:13:58.515 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-07T10:13:58.515 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout } 2026-03-07T10:13:58.515 INFO:teuthology.orchestra.run.vm01.stdout:mgr not available, waiting (4/15)... 2026-03-07T10:13:58.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:13:58 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/1812127146' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-07T10:13:59.474 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:13:59 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:13:59.162+0000 7fd646a62100 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-07T10:13:59.474 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:13:59 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:13:59.286+0000 7fd646a62100 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-07T10:13:59.474 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:13:59 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:13:59.410+0000 7fd646a62100 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-07T10:13:59.974 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:13:59 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:13:59.676+0000 7fd646a62100 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-07T10:13:59.974 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:13:59 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:13:59.797+0000 7fd646a62100 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-07T10:14:00.432 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:00 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:00.101+0000 7fd646a62100 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-07T10:14:00.724 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:00 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:00.431+0000 7fd646a62100 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-07T10:14:00.796 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-03-07T10:14:00.796 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout { 2026-03-07T10:14:00.796 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "fsid": "3fd6e214-1a0e-11f1-b256-99cfc35f3328", 2026-03-07T10:14:00.796 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "health": { 2026-03-07T10:14:00.796 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-07T10:14:00.796 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-07T10:14:00.796 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-07T10:14:00.796 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:14:00.796 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-07T10:14:00.796 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-07T10:14:00.796 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 0 2026-03-07T10:14:00.796 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:14:00.796 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-07T10:14:00.796 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "a" 2026-03-07T10:14:00.796 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:14:00.796 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum_age": 10, 2026-03-07T10:14:00.796 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-07T10:14:00.796 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:14:00.796 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-07T10:14:00.796 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-07T10:14:00.796 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:14:00.796 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-07T10:14:00.796 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:14:00.796 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-07T10:14:00.796 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-07T10:14:00.796 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-07T10:14:00.796 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-07T10:14:00.796 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-07T10:14:00.796 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-07T10:14:00.797 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:14:00.797 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-07T10:14:00.797 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-07T10:14:00.797 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-07T10:14:00.797 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-07T10:14:00.797 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-07T10:14:00.797 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-07T10:14:00.797 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-07T10:14:00.797 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-07T10:14:00.797 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-07T10:14:00.797 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:14:00.797 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-07T10:14:00.797 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:14:00.797 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "btime": "2026-03-07T10:13:49:200453+0000", 2026-03-07T10:14:00.797 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-07T10:14:00.797 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-07T10:14:00.797 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:14:00.797 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-07T10:14:00.797 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-07T10:14:00.797 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-07T10:14:00.797 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-07T10:14:00.797 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-07T10:14:00.797 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-07T10:14:00.797 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "restful" 2026-03-07T10:14:00.797 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:14:00.797 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-07T10:14:00.797 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:14:00.797 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-07T10:14:00.797 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:14:00.797 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "modified": "2026-03-07T10:13:49.201013+0000", 2026-03-07T10:14:00.797 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-07T10:14:00.797 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:14:00.797 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-07T10:14:00.797 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout } 2026-03-07T10:14:00.797 INFO:teuthology.orchestra.run.vm01.stdout:mgr not available, waiting (5/15)... 2026-03-07T10:14:01.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:00 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/1825768623' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-07T10:14:01.224 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:00 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:00.812+0000 7fd646a62100 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-07T10:14:01.224 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:00 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:00.927+0000 7fd646a62100 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-07T10:14:02.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:01 vm01 ceph-mon[49602]: Activating manager daemon a 2026-03-07T10:14:02.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:01 vm01 ceph-mon[49602]: mgrmap e2: a(active, starting, since 0.00399133s) 2026-03-07T10:14:02.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:01 vm01 ceph-mon[49602]: from='mgr.14100 192.168.123.101:0/3671220991' entity='mgr.a' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-07T10:14:02.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:01 vm01 ceph-mon[49602]: from='mgr.14100 192.168.123.101:0/3671220991' entity='mgr.a' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-07T10:14:02.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:01 vm01 ceph-mon[49602]: from='mgr.14100 192.168.123.101:0/3671220991' entity='mgr.a' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-07T10:14:02.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:01 vm01 ceph-mon[49602]: from='mgr.14100 192.168.123.101:0/3671220991' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:14:02.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:01 vm01 ceph-mon[49602]: from='mgr.14100 192.168.123.101:0/3671220991' entity='mgr.a' cmd=[{"prefix": "mgr metadata", "who": "a", "id": "a"}]: dispatch 2026-03-07T10:14:02.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:01 vm01 ceph-mon[49602]: Manager daemon a is now available 2026-03-07T10:14:02.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:01 vm01 ceph-mon[49602]: from='mgr.14100 192.168.123.101:0/3671220991' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/mirror_snapshot_schedule"}]: dispatch 2026-03-07T10:14:02.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:01 vm01 ceph-mon[49602]: from='mgr.14100 192.168.123.101:0/3671220991' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/trash_purge_schedule"}]: dispatch 2026-03-07T10:14:02.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:01 vm01 ceph-mon[49602]: from='mgr.14100 192.168.123.101:0/3671220991' entity='mgr.a' 2026-03-07T10:14:02.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:01 vm01 ceph-mon[49602]: from='mgr.14100 192.168.123.101:0/3671220991' entity='mgr.a' 2026-03-07T10:14:02.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:01 vm01 ceph-mon[49602]: from='mgr.14100 192.168.123.101:0/3671220991' entity='mgr.a' 2026-03-07T10:14:03.128 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:02 vm01 ceph-mon[49602]: mgrmap e3: a(active, since 1.01035s) 2026-03-07T10:14:03.173 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-03-07T10:14:03.173 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout { 2026-03-07T10:14:03.173 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "fsid": "3fd6e214-1a0e-11f1-b256-99cfc35f3328", 2026-03-07T10:14:03.173 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "health": { 2026-03-07T10:14:03.173 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-07T10:14:03.173 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-07T10:14:03.173 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-07T10:14:03.173 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:14:03.173 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 0 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "a" 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "quorum_age": 12, 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "btime": "2026-03-07T10:13:49:200453+0000", 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "available": true, 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "restful" 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ], 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "modified": "2026-03-07T10:13:49.201013+0000", 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout }, 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout } 2026-03-07T10:14:03.174 INFO:teuthology.orchestra.run.vm01.stdout:mgr is available 2026-03-07T10:14:03.518 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-03-07T10:14:03.518 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout [global] 2026-03-07T10:14:03.518 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout fsid = 3fd6e214-1a0e-11f1-b256-99cfc35f3328 2026-03-07T10:14:03.518 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mon_cluster_log_file_level = debug 2026-03-07T10:14:03.518 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mon_host = [v2:192.168.123.101:3300,v1:192.168.123.101:6789] 2026-03-07T10:14:03.518 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mon_osd_allow_pg_remap = true 2026-03-07T10:14:03.518 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mon_osd_allow_primary_affinity = true 2026-03-07T10:14:03.518 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mon_warn_on_no_sortbitwise = false 2026-03-07T10:14:03.518 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout osd_crush_chooseleaf_type = 0 2026-03-07T10:14:03.518 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-03-07T10:14:03.518 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout [mgr] 2026-03-07T10:14:03.518 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout mgr/telemetry/nag = false 2026-03-07T10:14:03.518 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 2026-03-07T10:14:03.518 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout [osd] 2026-03-07T10:14:03.518 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout osd_map_max_advance = 10 2026-03-07T10:14:03.518 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout osd_sloppy_crc = true 2026-03-07T10:14:03.518 INFO:teuthology.orchestra.run.vm01.stdout:Enabling cephadm module... 2026-03-07T10:14:04.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:03 vm01 ceph-mon[49602]: mgrmap e4: a(active, since 2s) 2026-03-07T10:14:04.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:03 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/3072353561' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-07T10:14:04.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:03 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/527461182' entity='client.admin' cmd=[{"prefix": "config assimilate-conf"}]: dispatch 2026-03-07T10:14:04.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:03 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/527461182' entity='client.admin' cmd='[{"prefix": "config assimilate-conf"}]': finished 2026-03-07T10:14:04.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:03 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/2063341138' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "cephadm"}]: dispatch 2026-03-07T10:14:04.776 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:04 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: ignoring --setuser ceph since I am not root 2026-03-07T10:14:04.776 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:04 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: ignoring --setgroup ceph since I am not root 2026-03-07T10:14:04.926 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout { 2026-03-07T10:14:04.926 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 5, 2026-03-07T10:14:04.926 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "available": true, 2026-03-07T10:14:04.926 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "active_name": "a", 2026-03-07T10:14:04.926 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_standby": 0 2026-03-07T10:14:04.926 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout } 2026-03-07T10:14:04.926 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for the mgr to restart... 2026-03-07T10:14:04.926 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for mgr epoch 5... 2026-03-07T10:14:05.047 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:04 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:04.775+0000 7fdc98b70100 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-07T10:14:05.048 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:04 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:04.906+0000 7fdc98b70100 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-07T10:14:05.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:05 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/2063341138' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "cephadm"}]': finished 2026-03-07T10:14:05.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:05 vm01 ceph-mon[49602]: mgrmap e5: a(active, since 3s) 2026-03-07T10:14:05.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:05 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/615134430' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-07T10:14:06.474 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:06 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:06.023+0000 7fdc98b70100 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-07T10:14:07.181 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:06 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:06.837+0000 7fdc98b70100 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-07T10:14:07.181 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:06 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:06.951+0000 7fdc98b70100 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-07T10:14:07.181 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:07 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:07.180+0000 7fdc98b70100 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-07T10:14:09.224 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:08 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:08.912+0000 7fdc98b70100 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-07T10:14:09.630 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:09 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:09.248+0000 7fdc98b70100 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-07T10:14:09.630 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:09 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:09.379+0000 7fdc98b70100 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-07T10:14:09.630 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:09 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:09.496+0000 7fdc98b70100 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-07T10:14:09.974 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:09 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:09.629+0000 7fdc98b70100 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-07T10:14:09.974 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:09 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:09.745+0000 7fdc98b70100 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-07T10:14:10.724 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:10 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:10.239+0000 7fdc98b70100 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-07T10:14:10.724 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:10 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:10.391+0000 7fdc98b70100 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-07T10:14:11.474 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:11 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:11.093+0000 7fdc98b70100 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-07T10:14:12.474 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:12 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:12.091+0000 7fdc98b70100 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-07T10:14:12.474 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:12 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:12.208+0000 7fdc98b70100 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-07T10:14:12.474 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:12 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:12.333+0000 7fdc98b70100 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-07T10:14:12.974 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:12 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:12.588+0000 7fdc98b70100 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-07T10:14:12.974 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:12 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:12.704+0000 7fdc98b70100 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-07T10:14:13.309 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:12 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:12.988+0000 7fdc98b70100 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-07T10:14:13.662 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:13 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:13.308+0000 7fdc98b70100 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-07T10:14:13.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:13 vm01 ceph-mon[49602]: Active manager daemon a restarted 2026-03-07T10:14:13.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:13 vm01 ceph-mon[49602]: Activating manager daemon a 2026-03-07T10:14:13.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:13 vm01 ceph-mon[49602]: osdmap e2: 0 total, 0 up, 0 in 2026-03-07T10:14:13.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:13 vm01 ceph-mon[49602]: mgrmap e6: a(active, starting, since 0.0215224s) 2026-03-07T10:14:13.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:13 vm01 ceph-mon[49602]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:14:13.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:13 vm01 ceph-mon[49602]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' cmd=[{"prefix": "mgr metadata", "who": "a", "id": "a"}]: dispatch 2026-03-07T10:14:13.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:13 vm01 ceph-mon[49602]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-07T10:14:13.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:13 vm01 ceph-mon[49602]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-07T10:14:13.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:13 vm01 ceph-mon[49602]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-07T10:14:13.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:13 vm01 ceph-mon[49602]: Manager daemon a is now available 2026-03-07T10:14:13.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:13 vm01 ceph-mon[49602]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' 2026-03-07T10:14:13.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:13 vm01 ceph-mon[49602]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' 2026-03-07T10:14:13.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:13 vm01 ceph-mon[49602]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' 2026-03-07T10:14:13.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:13 vm01 ceph-mon[49602]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:14:13.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:13 vm01 ceph-mon[49602]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/mirror_snapshot_schedule"}]: dispatch 2026-03-07T10:14:13.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:13 vm01 ceph-mon[49602]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:14:13.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:13 vm01 ceph-mon[49602]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/trash_purge_schedule"}]: dispatch 2026-03-07T10:14:13.975 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:13 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:13.661+0000 7fdc98b70100 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-07T10:14:13.975 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:13 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:13.778+0000 7fdc98b70100 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-07T10:14:14.878 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout { 2026-03-07T10:14:14.878 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mgrmap_epoch": 7, 2026-03-07T10:14:14.878 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "initialized": true 2026-03-07T10:14:14.878 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout } 2026-03-07T10:14:14.878 INFO:teuthology.orchestra.run.vm01.stdout:mgr epoch 5 is available 2026-03-07T10:14:14.878 INFO:teuthology.orchestra.run.vm01.stdout:Setting orchestrator backend to cephadm... 2026-03-07T10:14:15.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:15 vm01 ceph-mon[49602]: Found migration_current of "None". Setting to last migration. 2026-03-07T10:14:15.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:15 vm01 ceph-mon[49602]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' 2026-03-07T10:14:15.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:15 vm01 ceph-mon[49602]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' 2026-03-07T10:14:15.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:15 vm01 ceph-mon[49602]: mgrmap e7: a(active, since 1.03013s) 2026-03-07T10:14:15.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:15 vm01 ceph-mon[49602]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' 2026-03-07T10:14:15.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:15 vm01 ceph-mon[49602]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:14:15.564 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout value unchanged 2026-03-07T10:14:15.564 INFO:teuthology.orchestra.run.vm01.stdout:Generating ssh key... 2026-03-07T10:14:16.474 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:16 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: Generating public/private rsa key pair. 2026-03-07T10:14:16.475 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:16 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: Your identification has been saved in /tmp/tmpzm5azpa7/key 2026-03-07T10:14:16.475 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:16 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: Your public key has been saved in /tmp/tmpzm5azpa7/key.pub 2026-03-07T10:14:16.475 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:16 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: The key fingerprint is: 2026-03-07T10:14:16.475 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:16 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: SHA256:NkCC4DM3mithzTTnb3spbJsAyK3ZTyPwQdWG2SVi0v0 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328 2026-03-07T10:14:16.475 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:16 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: The key's randomart image is: 2026-03-07T10:14:16.475 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:16 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: +---[RSA 3072]----+ 2026-03-07T10:14:16.475 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:16 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: |.. oo+B... | 2026-03-07T10:14:16.475 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:16 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: |. . +*.=. | 2026-03-07T10:14:16.475 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:16 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: | + * .o . | 2026-03-07T10:14:16.475 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:16 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: |. & = . E | 2026-03-07T10:14:16.475 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:16 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: |.B * . S | 2026-03-07T10:14:16.475 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:16 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: |..B o .. . | 2026-03-07T10:14:16.475 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:16 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: |.+ + +.o . | 2026-03-07T10:14:16.475 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:16 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: |. + ++oo | 2026-03-07T10:14:16.475 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:16 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: | ..++ | 2026-03-07T10:14:16.475 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:16 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: +----[SHA256]-----+ 2026-03-07T10:14:16.925 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:16 vm01 ceph-mon[49602]: from='client.14128 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-07T10:14:16.925 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:16 vm01 ceph-mon[49602]: from='client.14128 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-07T10:14:16.926 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:16 vm01 ceph-mon[49602]: from='client.14136 -' entity='client.admin' cmd=[{"prefix": "orch set backend", "module_name": "cephadm", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:14:16.926 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:16 vm01 ceph-mon[49602]: [07/Mar/2026:10:14:15] ENGINE Bus STARTING 2026-03-07T10:14:16.926 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:16 vm01 ceph-mon[49602]: from='client.14138 -' entity='client.admin' cmd=[{"prefix": "cephadm set-user", "user": "root", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:14:16.926 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:16 vm01 ceph-mon[49602]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:14:16.926 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:16 vm01 ceph-mon[49602]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' 2026-03-07T10:14:16.926 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:16 vm01 ceph-mon[49602]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' 2026-03-07T10:14:16.956 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDPwhclgAgrEy1qfr7KJYb742/VXFjbVr6dbSE2QSv0wFUFJg80QgKuLpf/fOMal8yg8LDh31g8oEUMoMr1Dccx+TxDWe00lZcyuD32KVzDIPpy5njNujauXPjS8wsMTVqDjn/obcifQBwPVTCsX45GdVymGbkSe9YajUZm72Rj30QnFifGVYEIrmZnEcM5//KDPtCpml3t0uklLoW1z6CcHgcFfWBM4+ZUss/wNsrshYCXiZ1hVHDopgtgk7XwLomjZhLUR5ZgMsOlhugL6iygjyk4AdQ09qOjUdcFumXnA3q4silqauz0fS1shqipKd1xKwd7e/nvzW4v/VsVvzaSnBPKBj5P7yH8XFo3XJT7ELm7arL9pvFBAJeqJyNHsOKwsrHdUsN3CqKI3F3TepdTrahc8yG2hUXK0iqcHkwgvyVToMlWeRFv3erxRqBc/RRGOiA9YA4ofSItfVH5B3tKA2ftv2LQbnV0js+KHAryDOFN01Jh0bqJM+dHExHyxJ8= ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328 2026-03-07T10:14:16.956 INFO:teuthology.orchestra.run.vm01.stdout:Wrote public SSH key to /home/ubuntu/cephtest/ceph.pub 2026-03-07T10:14:16.956 INFO:teuthology.orchestra.run.vm01.stdout:Adding key to root@localhost authorized_keys... 2026-03-07T10:14:16.959 INFO:teuthology.orchestra.run.vm01.stdout:Adding host vm01... 2026-03-07T10:14:17.871 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:17 vm01 ceph-mon[49602]: [07/Mar/2026:10:14:15] ENGINE Serving on https://192.168.123.101:7150 2026-03-07T10:14:17.871 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:17 vm01 ceph-mon[49602]: [07/Mar/2026:10:14:15] ENGINE Client ('192.168.123.101', 45768) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-07T10:14:17.871 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:17 vm01 ceph-mon[49602]: [07/Mar/2026:10:14:15] ENGINE Serving on http://192.168.123.101:8765 2026-03-07T10:14:17.871 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:17 vm01 ceph-mon[49602]: [07/Mar/2026:10:14:15] ENGINE Bus STARTED 2026-03-07T10:14:17.871 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:17 vm01 ceph-mon[49602]: from='client.14140 -' entity='client.admin' cmd=[{"prefix": "cephadm generate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:14:17.871 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:17 vm01 ceph-mon[49602]: Generating ssh key... 2026-03-07T10:14:17.871 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:17 vm01 ceph-mon[49602]: mgrmap e8: a(active, since 2s) 2026-03-07T10:14:17.871 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:17 vm01 ceph-mon[49602]: from='client.14142 -' entity='client.admin' cmd=[{"prefix": "cephadm get-pub-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:14:17.871 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:17 vm01 ceph-mon[49602]: from='client.14144 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm01", "addr": "192.168.123.101", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:14:18.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:18 vm01 ceph-mon[49602]: Deploying cephadm binary to vm01 2026-03-07T10:14:19.529 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout Added host 'vm01' with addr '192.168.123.101' 2026-03-07T10:14:19.529 INFO:teuthology.orchestra.run.vm01.stdout:Deploying unmanaged mon service... 2026-03-07T10:14:19.915 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout Scheduled mon update... 2026-03-07T10:14:19.916 INFO:teuthology.orchestra.run.vm01.stdout:Deploying unmanaged mgr service... 2026-03-07T10:14:20.295 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout Scheduled mgr update... 2026-03-07T10:14:20.594 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:20 vm01 ceph-mon[49602]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' 2026-03-07T10:14:20.594 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:20 vm01 ceph-mon[49602]: Added host vm01 2026-03-07T10:14:20.594 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:20 vm01 ceph-mon[49602]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:14:20.594 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:20 vm01 ceph-mon[49602]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' 2026-03-07T10:14:20.594 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:20 vm01 ceph-mon[49602]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' 2026-03-07T10:14:21.115 INFO:teuthology.orchestra.run.vm01.stdout:Enabling the dashboard module... 2026-03-07T10:14:21.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:21 vm01 ceph-mon[49602]: from='client.14146 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "unmanaged": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:14:21.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:21 vm01 ceph-mon[49602]: Saving service mon spec with placement count:5 2026-03-07T10:14:21.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:21 vm01 ceph-mon[49602]: from='client.14148 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "unmanaged": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:14:21.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:21 vm01 ceph-mon[49602]: Saving service mgr spec with placement count:2 2026-03-07T10:14:21.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:21 vm01 ceph-mon[49602]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' 2026-03-07T10:14:21.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:21 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/706353935' entity='client.admin' 2026-03-07T10:14:21.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:21 vm01 ceph-mon[49602]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' 2026-03-07T10:14:21.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:21 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/933739697' entity='client.admin' 2026-03-07T10:14:21.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:21 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/2630861903' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "dashboard"}]: dispatch 2026-03-07T10:14:21.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:21 vm01 ceph-mon[49602]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' 2026-03-07T10:14:21.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:21 vm01 ceph-mon[49602]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' 2026-03-07T10:14:21.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:21 vm01 ceph-mon[49602]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:14:21.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:21 vm01 ceph-mon[49602]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' 2026-03-07T10:14:21.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:21 vm01 ceph-mon[49602]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm01", "caps": []}]: dispatch 2026-03-07T10:14:21.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:21 vm01 ceph-mon[49602]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' cmd='[{"prefix": "auth get-or-create", "entity": "client.agent.vm01", "caps": []}]': finished 2026-03-07T10:14:22.392 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:22 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: ignoring --setuser ceph since I am not root 2026-03-07T10:14:22.393 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:22 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: ignoring --setgroup ceph since I am not root 2026-03-07T10:14:22.567 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout { 2026-03-07T10:14:22.567 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "epoch": 9, 2026-03-07T10:14:22.567 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "available": true, 2026-03-07T10:14:22.567 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "active_name": "a", 2026-03-07T10:14:22.567 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "num_standby": 0 2026-03-07T10:14:22.567 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout } 2026-03-07T10:14:22.567 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for the mgr to restart... 2026-03-07T10:14:22.567 INFO:teuthology.orchestra.run.vm01.stdout:Waiting for mgr epoch 9... 2026-03-07T10:14:22.648 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:22 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:22.390+0000 7fe898f12100 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-07T10:14:22.648 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:22 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:22.526+0000 7fe898f12100 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-07T10:14:23.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:23 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/2630861903' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "dashboard"}]': finished 2026-03-07T10:14:23.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:23 vm01 ceph-mon[49602]: mgrmap e9: a(active, since 8s) 2026-03-07T10:14:23.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:23 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/43663662' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-07T10:14:23.974 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:23 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:23.657+0000 7fe898f12100 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-07T10:14:24.857 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:24 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:24.496+0000 7fe898f12100 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-07T10:14:24.857 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:24 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:24.616+0000 7fe898f12100 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-07T10:14:25.224 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:24 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:24.856+0000 7fe898f12100 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-07T10:14:26.974 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:26 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:26.674+0000 7fe898f12100 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-07T10:14:27.261 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:27 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:27.002+0000 7fe898f12100 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-07T10:14:27.261 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:27 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:27.136+0000 7fe898f12100 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-07T10:14:27.517 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:27 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:27.260+0000 7fe898f12100 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-07T10:14:27.517 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:27 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:27.398+0000 7fe898f12100 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-07T10:14:27.974 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:27 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:27.517+0000 7fe898f12100 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-07T10:14:28.474 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:28 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:28.033+0000 7fe898f12100 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-07T10:14:28.474 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:28 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:28.183+0000 7fe898f12100 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-07T10:14:29.224 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:28 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:28.898+0000 7fe898f12100 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-07T10:14:30.224 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:29 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:29.914+0000 7fe898f12100 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-07T10:14:30.224 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:30 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:30.038+0000 7fe898f12100 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-07T10:14:30.224 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:30 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:30.160+0000 7fe898f12100 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-07T10:14:30.724 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:30 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:30.413+0000 7fe898f12100 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-07T10:14:30.724 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:30 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:30.531+0000 7fe898f12100 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-07T10:14:31.161 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:30 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:30.823+0000 7fe898f12100 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-07T10:14:31.474 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:31 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:31.160+0000 7fe898f12100 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-07T10:14:31.798 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:31 vm01 ceph-mon[49602]: Active manager daemon a restarted 2026-03-07T10:14:31.798 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:31 vm01 ceph-mon[49602]: Activating manager daemon a 2026-03-07T10:14:31.798 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:31 vm01 ceph-mon[49602]: osdmap e3: 0 total, 0 up, 0 in 2026-03-07T10:14:31.798 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:31 vm01 ceph-mon[49602]: mgrmap e10: a(active, starting, since 0.0197641s) 2026-03-07T10:14:31.798 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:31 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:14:31.798 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:31 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mgr metadata", "who": "a", "id": "a"}]: dispatch 2026-03-07T10:14:31.798 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:31 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-07T10:14:31.798 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:31 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-07T10:14:31.798 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:31 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-07T10:14:31.798 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:31 vm01 ceph-mon[49602]: Manager daemon a is now available 2026-03-07T10:14:31.798 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:31 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:31.798 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:31 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:14:31.798 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:31 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/mirror_snapshot_schedule"}]: dispatch 2026-03-07T10:14:31.798 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:31 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/trash_purge_schedule"}]: dispatch 2026-03-07T10:14:31.798 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:31 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:31.514+0000 7fe898f12100 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-07T10:14:31.798 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:31 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:31.635+0000 7fe898f12100 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-07T10:14:32.704 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout { 2026-03-07T10:14:32.704 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "mgrmap_epoch": 11, 2026-03-07T10:14:32.704 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout "initialized": true 2026-03-07T10:14:32.704 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout } 2026-03-07T10:14:32.704 INFO:teuthology.orchestra.run.vm01.stdout:mgr epoch 9 is available 2026-03-07T10:14:32.704 INFO:teuthology.orchestra.run.vm01.stdout:Generating a dashboard self-signed certificate... 2026-03-07T10:14:33.226 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout Self-signed certificate created 2026-03-07T10:14:33.226 INFO:teuthology.orchestra.run.vm01.stdout:Creating initial admin user... 2026-03-07T10:14:33.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:33 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:33.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:33 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:14:33.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:33 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:33.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:33 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm01", "caps": []}]: dispatch 2026-03-07T10:14:33.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:33 vm01 ceph-mon[49602]: mgrmap e11: a(active, since 1.02246s) 2026-03-07T10:14:33.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:33 vm01 ceph-mon[49602]: Deploying daemon agent.vm01 on vm01 2026-03-07T10:14:33.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:33 vm01 ceph-mon[49602]: [07/Mar/2026:10:14:33] ENGINE Bus STARTING 2026-03-07T10:14:33.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:33 vm01 ceph-mon[49602]: from='client.14168 -' entity='client.admin' cmd=[{"prefix": "dashboard create-self-signed-cert", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:14:33.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:33 vm01 ceph-mon[49602]: [07/Mar/2026:10:14:33] ENGINE Serving on http://192.168.123.101:8765 2026-03-07T10:14:33.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:33 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:33.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:33 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:33.705 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout {"username": "admin", "password": "$2b$12$WXjhp0f5Zhaq.zEw5W4Nq.BmLjmSQR9zlJOvi6tH7Ezw6PXXAGT7i", "roles": ["administrator"], "name": null, "email": null, "lastUpdate": 1772878473, "enabled": true, "pwdExpirationDate": null, "pwdUpdateRequired": true} 2026-03-07T10:14:33.705 INFO:teuthology.orchestra.run.vm01.stdout:Fetching dashboard port number... 2026-03-07T10:14:34.036 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stdout 8443 2026-03-07T10:14:34.036 INFO:teuthology.orchestra.run.vm01.stdout:firewalld does not appear to be present 2026-03-07T10:14:34.036 INFO:teuthology.orchestra.run.vm01.stdout:Not possible to open ports <[8443]>. firewalld.service is not available 2026-03-07T10:14:34.037 INFO:teuthology.orchestra.run.vm01.stdout:Ceph Dashboard is now available at: 2026-03-07T10:14:34.037 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-07T10:14:34.038 INFO:teuthology.orchestra.run.vm01.stdout: URL: https://vm01.local:8443/ 2026-03-07T10:14:34.038 INFO:teuthology.orchestra.run.vm01.stdout: User: admin 2026-03-07T10:14:34.038 INFO:teuthology.orchestra.run.vm01.stdout: Password: nfeh7t8690 2026-03-07T10:14:34.038 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-07T10:14:34.038 INFO:teuthology.orchestra.run.vm01.stdout:Saving cluster configuration to /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/config directory 2026-03-07T10:14:34.428 INFO:teuthology.orchestra.run.vm01.stdout:/usr/bin/ceph: stderr set mgr/dashboard/cluster/status 2026-03-07T10:14:34.428 INFO:teuthology.orchestra.run.vm01.stdout:You can access the Ceph CLI as following in case of multi-cluster or non-default config: 2026-03-07T10:14:34.428 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-07T10:14:34.428 INFO:teuthology.orchestra.run.vm01.stdout: sudo /home/ubuntu/cephtest/cephadm shell --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring 2026-03-07T10:14:34.428 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-07T10:14:34.428 INFO:teuthology.orchestra.run.vm01.stdout:Or, if you are only running a single cluster on this host: 2026-03-07T10:14:34.428 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-07T10:14:34.428 INFO:teuthology.orchestra.run.vm01.stdout: sudo /home/ubuntu/cephtest/cephadm shell 2026-03-07T10:14:34.428 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-07T10:14:34.428 INFO:teuthology.orchestra.run.vm01.stdout:Please consider enabling telemetry to help improve Ceph: 2026-03-07T10:14:34.429 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-07T10:14:34.429 INFO:teuthology.orchestra.run.vm01.stdout: ceph telemetry on 2026-03-07T10:14:34.429 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-07T10:14:34.429 INFO:teuthology.orchestra.run.vm01.stdout:For more information see: 2026-03-07T10:14:34.429 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-07T10:14:34.429 INFO:teuthology.orchestra.run.vm01.stdout: https://docs.ceph.com/en/latest/mgr/telemetry/ 2026-03-07T10:14:34.429 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-07T10:14:34.429 INFO:teuthology.orchestra.run.vm01.stdout:Bootstrap complete. 2026-03-07T10:14:34.458 INFO:tasks.cephadm:Fetching config... 2026-03-07T10:14:34.458 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-03-07T10:14:34.458 DEBUG:teuthology.orchestra.run.vm01:> dd if=/etc/ceph/ceph.conf of=/dev/stdout 2026-03-07T10:14:34.478 INFO:tasks.cephadm:Fetching client.admin keyring... 2026-03-07T10:14:34.478 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-03-07T10:14:34.478 DEBUG:teuthology.orchestra.run.vm01:> dd if=/etc/ceph/ceph.client.admin.keyring of=/dev/stdout 2026-03-07T10:14:34.540 INFO:tasks.cephadm:Fetching mon keyring... 2026-03-07T10:14:34.541 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-03-07T10:14:34.541 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/keyring of=/dev/stdout 2026-03-07T10:14:34.614 INFO:tasks.cephadm:Fetching pub ssh key... 2026-03-07T10:14:34.615 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-03-07T10:14:34.615 DEBUG:teuthology.orchestra.run.vm01:> dd if=/home/ubuntu/cephtest/ceph.pub of=/dev/stdout 2026-03-07T10:14:34.675 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:34 vm01 ceph-mon[49602]: [07/Mar/2026:10:14:33] ENGINE Serving on https://192.168.123.101:7150 2026-03-07T10:14:34.675 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:34 vm01 ceph-mon[49602]: [07/Mar/2026:10:14:33] ENGINE Bus STARTED 2026-03-07T10:14:34.676 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:34 vm01 ceph-mon[49602]: [07/Mar/2026:10:14:33] ENGINE Client ('192.168.123.101', 49712) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-07T10:14:34.676 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:34 vm01 ceph-mon[49602]: from='client.14170 -' entity='client.admin' cmd=[{"prefix": "dashboard ac-user-create", "username": "admin", "rolename": "administrator", "force_password": true, "pwd_update_required": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:14:34.676 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:34 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:34.676 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:34 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/2795827174' entity='client.admin' cmd=[{"prefix": "config get", "who": "mgr", "key": "mgr/dashboard/ssl_server_port"}]: dispatch 2026-03-07T10:14:34.676 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:34 vm01 ceph-mon[49602]: mgrmap e12: a(active, since 2s) 2026-03-07T10:14:34.676 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:34 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/985324366' entity='client.admin' 2026-03-07T10:14:34.685 INFO:tasks.cephadm:Installing pub ssh key for root users... 2026-03-07T10:14:34.685 DEBUG:teuthology.orchestra.run.vm01:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDPwhclgAgrEy1qfr7KJYb742/VXFjbVr6dbSE2QSv0wFUFJg80QgKuLpf/fOMal8yg8LDh31g8oEUMoMr1Dccx+TxDWe00lZcyuD32KVzDIPpy5njNujauXPjS8wsMTVqDjn/obcifQBwPVTCsX45GdVymGbkSe9YajUZm72Rj30QnFifGVYEIrmZnEcM5//KDPtCpml3t0uklLoW1z6CcHgcFfWBM4+ZUss/wNsrshYCXiZ1hVHDopgtgk7XwLomjZhLUR5ZgMsOlhugL6iygjyk4AdQ09qOjUdcFumXnA3q4silqauz0fS1shqipKd1xKwd7e/nvzW4v/VsVvzaSnBPKBj5P7yH8XFo3XJT7ELm7arL9pvFBAJeqJyNHsOKwsrHdUsN3CqKI3F3TepdTrahc8yG2hUXK0iqcHkwgvyVToMlWeRFv3erxRqBc/RRGOiA9YA4ofSItfVH5B3tKA2ftv2LQbnV0js+KHAryDOFN01Jh0bqJM+dHExHyxJ8= ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-07T10:14:34.797 INFO:teuthology.orchestra.run.vm01.stdout:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDPwhclgAgrEy1qfr7KJYb742/VXFjbVr6dbSE2QSv0wFUFJg80QgKuLpf/fOMal8yg8LDh31g8oEUMoMr1Dccx+TxDWe00lZcyuD32KVzDIPpy5njNujauXPjS8wsMTVqDjn/obcifQBwPVTCsX45GdVymGbkSe9YajUZm72Rj30QnFifGVYEIrmZnEcM5//KDPtCpml3t0uklLoW1z6CcHgcFfWBM4+ZUss/wNsrshYCXiZ1hVHDopgtgk7XwLomjZhLUR5ZgMsOlhugL6iygjyk4AdQ09qOjUdcFumXnA3q4silqauz0fS1shqipKd1xKwd7e/nvzW4v/VsVvzaSnBPKBj5P7yH8XFo3XJT7ELm7arL9pvFBAJeqJyNHsOKwsrHdUsN3CqKI3F3TepdTrahc8yG2hUXK0iqcHkwgvyVToMlWeRFv3erxRqBc/RRGOiA9YA4ofSItfVH5B3tKA2ftv2LQbnV0js+KHAryDOFN01Jh0bqJM+dHExHyxJ8= ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328 2026-03-07T10:14:34.808 DEBUG:teuthology.orchestra.run.vm04:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDPwhclgAgrEy1qfr7KJYb742/VXFjbVr6dbSE2QSv0wFUFJg80QgKuLpf/fOMal8yg8LDh31g8oEUMoMr1Dccx+TxDWe00lZcyuD32KVzDIPpy5njNujauXPjS8wsMTVqDjn/obcifQBwPVTCsX45GdVymGbkSe9YajUZm72Rj30QnFifGVYEIrmZnEcM5//KDPtCpml3t0uklLoW1z6CcHgcFfWBM4+ZUss/wNsrshYCXiZ1hVHDopgtgk7XwLomjZhLUR5ZgMsOlhugL6iygjyk4AdQ09qOjUdcFumXnA3q4silqauz0fS1shqipKd1xKwd7e/nvzW4v/VsVvzaSnBPKBj5P7yH8XFo3XJT7ELm7arL9pvFBAJeqJyNHsOKwsrHdUsN3CqKI3F3TepdTrahc8yG2hUXK0iqcHkwgvyVToMlWeRFv3erxRqBc/RRGOiA9YA4ofSItfVH5B3tKA2ftv2LQbnV0js+KHAryDOFN01Jh0bqJM+dHExHyxJ8= ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-07T10:14:34.844 INFO:teuthology.orchestra.run.vm04.stdout:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDPwhclgAgrEy1qfr7KJYb742/VXFjbVr6dbSE2QSv0wFUFJg80QgKuLpf/fOMal8yg8LDh31g8oEUMoMr1Dccx+TxDWe00lZcyuD32KVzDIPpy5njNujauXPjS8wsMTVqDjn/obcifQBwPVTCsX45GdVymGbkSe9YajUZm72Rj30QnFifGVYEIrmZnEcM5//KDPtCpml3t0uklLoW1z6CcHgcFfWBM4+ZUss/wNsrshYCXiZ1hVHDopgtgk7XwLomjZhLUR5ZgMsOlhugL6iygjyk4AdQ09qOjUdcFumXnA3q4silqauz0fS1shqipKd1xKwd7e/nvzW4v/VsVvzaSnBPKBj5P7yH8XFo3XJT7ELm7arL9pvFBAJeqJyNHsOKwsrHdUsN3CqKI3F3TepdTrahc8yG2hUXK0iqcHkwgvyVToMlWeRFv3erxRqBc/RRGOiA9YA4ofSItfVH5B3tKA2ftv2LQbnV0js+KHAryDOFN01Jh0bqJM+dHExHyxJ8= ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328 2026-03-07T10:14:34.854 DEBUG:teuthology.orchestra.run.vm07:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDPwhclgAgrEy1qfr7KJYb742/VXFjbVr6dbSE2QSv0wFUFJg80QgKuLpf/fOMal8yg8LDh31g8oEUMoMr1Dccx+TxDWe00lZcyuD32KVzDIPpy5njNujauXPjS8wsMTVqDjn/obcifQBwPVTCsX45GdVymGbkSe9YajUZm72Rj30QnFifGVYEIrmZnEcM5//KDPtCpml3t0uklLoW1z6CcHgcFfWBM4+ZUss/wNsrshYCXiZ1hVHDopgtgk7XwLomjZhLUR5ZgMsOlhugL6iygjyk4AdQ09qOjUdcFumXnA3q4silqauz0fS1shqipKd1xKwd7e/nvzW4v/VsVvzaSnBPKBj5P7yH8XFo3XJT7ELm7arL9pvFBAJeqJyNHsOKwsrHdUsN3CqKI3F3TepdTrahc8yG2hUXK0iqcHkwgvyVToMlWeRFv3erxRqBc/RRGOiA9YA4ofSItfVH5B3tKA2ftv2LQbnV0js+KHAryDOFN01Jh0bqJM+dHExHyxJ8= ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-07T10:14:34.889 INFO:teuthology.orchestra.run.vm07.stdout:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDPwhclgAgrEy1qfr7KJYb742/VXFjbVr6dbSE2QSv0wFUFJg80QgKuLpf/fOMal8yg8LDh31g8oEUMoMr1Dccx+TxDWe00lZcyuD32KVzDIPpy5njNujauXPjS8wsMTVqDjn/obcifQBwPVTCsX45GdVymGbkSe9YajUZm72Rj30QnFifGVYEIrmZnEcM5//KDPtCpml3t0uklLoW1z6CcHgcFfWBM4+ZUss/wNsrshYCXiZ1hVHDopgtgk7XwLomjZhLUR5ZgMsOlhugL6iygjyk4AdQ09qOjUdcFumXnA3q4silqauz0fS1shqipKd1xKwd7e/nvzW4v/VsVvzaSnBPKBj5P7yH8XFo3XJT7ELm7arL9pvFBAJeqJyNHsOKwsrHdUsN3CqKI3F3TepdTrahc8yG2hUXK0iqcHkwgvyVToMlWeRFv3erxRqBc/RRGOiA9YA4ofSItfVH5B3tKA2ftv2LQbnV0js+KHAryDOFN01Jh0bqJM+dHExHyxJ8= ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328 2026-03-07T10:14:34.900 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph config set mgr mgr/cephadm/allow_ptrace true 2026-03-07T10:14:35.175 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:14:35.726 INFO:tasks.cephadm:Distributing conf and client.admin keyring to all hosts + 0755 2026-03-07T10:14:35.726 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph orch client-keyring set client.admin '*' --mode 0755 2026-03-07T10:14:36.001 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:14:36.437 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:36 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:36.437 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:36 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:36.437 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:36 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:36.437 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:36 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:14:36.437 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:36 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:36.437 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:36 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:36.437 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:36 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:36.437 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:36 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:36.437 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:36 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:36.437 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:36 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:14:36.437 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:36 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:36.437 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:36 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:36.437 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:36 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/3409756251' entity='client.admin' 2026-03-07T10:14:36.437 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:36 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:14:36.437 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:36 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:36.437 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:36 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:36.437 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:36 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:36.483 INFO:tasks.cephadm:Writing (initial) conf and keyring to vm04 2026-03-07T10:14:36.483 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-03-07T10:14:36.483 DEBUG:teuthology.orchestra.run.vm04:> dd of=/etc/ceph/ceph.conf 2026-03-07T10:14:36.498 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-03-07T10:14:36.498 DEBUG:teuthology.orchestra.run.vm04:> dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-07T10:14:36.551 INFO:tasks.cephadm:Adding host vm04 to orchestrator... 2026-03-07T10:14:36.552 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph orch host add vm04 2026-03-07T10:14:36.843 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:14:37.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:37 vm01 ceph-mon[49602]: from='client.14178 -' entity='client.admin' cmd=[{"prefix": "orch client-keyring set", "entity": "client.admin", "placement": "*", "mode": "0755", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:14:37.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:37 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:37.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:37 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:14:37.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:37 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:14:37.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:37 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:14:37.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:37 vm01 ceph-mon[49602]: Updating vm01:/etc/ceph/ceph.conf 2026-03-07T10:14:37.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:37 vm01 ceph-mon[49602]: Updating vm01:/var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/config/ceph.conf 2026-03-07T10:14:37.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:37 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:37.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:37 vm01 ceph-mon[49602]: Updating vm01:/etc/ceph/ceph.client.admin.keyring 2026-03-07T10:14:37.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:37 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:37.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:37 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:38.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:38 vm01 ceph-mon[49602]: Updating vm01:/var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/config/ceph.client.admin.keyring 2026-03-07T10:14:38.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:38 vm01 ceph-mon[49602]: from='client.14180 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm04", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:14:38.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:38 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:38.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:38 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:38.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:38 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:38.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:38 vm01 ceph-mon[49602]: Deploying cephadm binary to vm04 2026-03-07T10:14:39.419 INFO:teuthology.orchestra.run.vm01.stdout:Added host 'vm04' with addr '192.168.123.104' 2026-03-07T10:14:39.466 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph orch host ls --format=json 2026-03-07T10:14:39.629 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:14:39.678 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:39 vm01 ceph-mon[49602]: mgrmap e13: a(active, since 6s) 2026-03-07T10:14:39.678 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:39 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:39.678 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:39 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:14:39.938 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-07T10:14:39.939 INFO:teuthology.orchestra.run.vm01.stdout:[{"addr": "192.168.123.101", "hostname": "vm01", "labels": [], "status": ""}, {"addr": "192.168.123.104", "hostname": "vm04", "labels": [], "status": ""}] 2026-03-07T10:14:40.012 INFO:tasks.cephadm:Writing (initial) conf and keyring to vm07 2026-03-07T10:14:40.013 DEBUG:teuthology.orchestra.run.vm07:> set -ex 2026-03-07T10:14:40.013 DEBUG:teuthology.orchestra.run.vm07:> dd of=/etc/ceph/ceph.conf 2026-03-07T10:14:40.027 DEBUG:teuthology.orchestra.run.vm07:> set -ex 2026-03-07T10:14:40.027 DEBUG:teuthology.orchestra.run.vm07:> dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-07T10:14:40.082 INFO:tasks.cephadm:Adding host vm07 to orchestrator... 2026-03-07T10:14:40.082 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph orch host add vm07 2026-03-07T10:14:40.260 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:14:40.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:40 vm01 ceph-mon[49602]: Added host vm04 2026-03-07T10:14:40.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:40 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:40.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:40 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:40.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:40 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:40.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:40 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:14:40.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:40 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:14:40.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:40 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:14:40.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:40 vm01 ceph-mon[49602]: Updating vm04:/etc/ceph/ceph.conf 2026-03-07T10:14:40.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:40 vm01 ceph-mon[49602]: Updating vm04:/var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/config/ceph.conf 2026-03-07T10:14:40.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:40 vm01 ceph-mon[49602]: from='client.14182 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:14:40.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:40 vm01 ceph-mon[49602]: Updating vm04:/etc/ceph/ceph.client.admin.keyring 2026-03-07T10:14:40.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:40 vm01 ceph-mon[49602]: Updating vm04:/var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/config/ceph.client.admin.keyring 2026-03-07T10:14:40.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:40 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:40.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:40 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:40.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:40 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm04", "caps": []}]: dispatch 2026-03-07T10:14:40.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:40 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd='[{"prefix": "auth get-or-create", "entity": "client.agent.vm04", "caps": []}]': finished 2026-03-07T10:14:42.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:41 vm01 ceph-mon[49602]: Deploying daemon agent.vm04 on vm04 2026-03-07T10:14:42.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:41 vm01 ceph-mon[49602]: from='client.14184 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm07", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:14:42.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:41 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:42.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:41 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:42.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:41 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:42.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:41 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:14:42.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:41 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:14:42.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:41 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:14:42.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:41 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:42.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:41 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:14:42.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:41 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:14:42.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:41 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:14:42.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:41 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:42.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:41 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:42.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:41 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:42.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:41 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:42.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:41 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:42.803 INFO:teuthology.orchestra.run.vm01.stdout:Added host 'vm07' with addr '192.168.123.107' 2026-03-07T10:14:42.853 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph orch host ls --format=json 2026-03-07T10:14:43.014 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:14:43.056 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:42 vm01 ceph-mon[49602]: Deploying cephadm binary to vm07 2026-03-07T10:14:43.056 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:42 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:43.056 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:42 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:43.056 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:42 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:43.057 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:42 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:14:43.315 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-07T10:14:43.315 INFO:teuthology.orchestra.run.vm01.stdout:[{"addr": "192.168.123.101", "hostname": "vm01", "labels": [], "status": ""}, {"addr": "192.168.123.104", "hostname": "vm04", "labels": [], "status": ""}, {"addr": "192.168.123.107", "hostname": "vm07", "labels": [], "status": ""}] 2026-03-07T10:14:43.370 INFO:tasks.cephadm:Setting crush tunables to default 2026-03-07T10:14:43.370 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph osd crush tunables default 2026-03-07T10:14:43.539 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:14:44.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:43 vm01 ceph-mon[49602]: Added host vm07 2026-03-07T10:14:44.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:43 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:44.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:43 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:44.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:43 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:44.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:43 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd/host:vm07", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:14:44.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:43 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:14:44.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:43 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:14:44.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:43 vm01 ceph-mon[49602]: Updating vm07:/etc/ceph/ceph.conf 2026-03-07T10:14:44.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:43 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:44.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:43 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:44.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:43 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm07", "caps": []}]: dispatch 2026-03-07T10:14:44.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:43 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd='[{"prefix": "auth get-or-create", "entity": "client.agent.vm07", "caps": []}]': finished 2026-03-07T10:14:44.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:43 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/3833318551' entity='client.admin' cmd=[{"prefix": "osd crush tunables", "profile": "default"}]: dispatch 2026-03-07T10:14:44.569 INFO:teuthology.orchestra.run.vm01.stderr:adjusted tunables profile to default 2026-03-07T10:14:44.618 INFO:tasks.cephadm:Adding mon.a on vm01 2026-03-07T10:14:44.618 INFO:tasks.cephadm:Adding mon.b on vm04 2026-03-07T10:14:44.618 INFO:tasks.cephadm:Adding mon.c on vm07 2026-03-07T10:14:44.618 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph orch apply mon '3;vm01:192.168.123.101=a;vm04:192.168.123.104=b;vm07:192.168.123.107=c' 2026-03-07T10:14:44.885 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/config/ceph.conf 2026-03-07T10:14:45.292 INFO:teuthology.orchestra.run.vm07.stdout:Scheduled mon update... 2026-03-07T10:14:45.355 DEBUG:teuthology.orchestra.run.vm04:mon.b> sudo journalctl -f -n 0 -u ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@mon.b.service 2026-03-07T10:14:45.357 DEBUG:teuthology.orchestra.run.vm07:mon.c> sudo journalctl -f -n 0 -u ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@mon.c.service 2026-03-07T10:14:45.359 INFO:tasks.cephadm:Waiting for 3 mons in monmap... 2026-03-07T10:14:45.359 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph mon dump -f json 2026-03-07T10:14:45.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:44 vm01 ceph-mon[49602]: Updating vm07:/var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/config/ceph.conf 2026-03-07T10:14:45.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:44 vm01 ceph-mon[49602]: from='client.14186 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:14:45.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:44 vm01 ceph-mon[49602]: Updating vm07:/etc/ceph/ceph.client.admin.keyring 2026-03-07T10:14:45.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:44 vm01 ceph-mon[49602]: Updating vm07:/var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/config/ceph.client.admin.keyring 2026-03-07T10:14:45.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:44 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/3833318551' entity='client.admin' cmd='[{"prefix": "osd crush tunables", "profile": "default"}]': finished 2026-03-07T10:14:45.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:44 vm01 ceph-mon[49602]: osdmap e4: 0 total, 0 up, 0 in 2026-03-07T10:14:45.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:44 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:45.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:44 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:45.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:44 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:45.588 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/config/ceph.conf 2026-03-07T10:14:46.031 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-07T10:14:46.031 INFO:teuthology.orchestra.run.vm07.stdout:{"epoch":1,"fsid":"3fd6e214-1a0e-11f1-b256-99cfc35f3328","modified":"2026-03-07T10:13:48.241132Z","created":"2026-03-07T10:13:48.241132Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-07T10:14:46.032 INFO:teuthology.orchestra.run.vm07.stderr:dumped monmap epoch 1 2026-03-07T10:14:46.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:45 vm01 ceph-mon[49602]: Deploying daemon agent.vm07 on vm07 2026-03-07T10:14:46.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:45 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:14:46.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:45 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:14:46.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:45 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:14:46.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:45 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:46.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:45 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:14:46.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:45 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:14:46.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:45 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:14:46.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:45 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:46.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:45 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:46.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:45 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:14:46.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:45 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:14:46.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:45 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:14:46.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:45 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:46.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:45 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-07T10:14:46.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:45 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:14:46.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:45 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:46.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:45 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:46.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:45 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:46.702 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: mon.b@-1(???) e0 preinit fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 2026-03-07T10:14:47.087 INFO:tasks.cephadm:Waiting for 3 mons in monmap... 2026-03-07T10:14:47.087 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph mon dump -f json 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: mon.b@-1(synchronizing).mds e1 new map 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: mon.b@-1(synchronizing).mds e1 print_map 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout: e1 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout: btime 2026-03-07T10:13:49:200453+0000 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2,11=minor log segments,12=quiesce subvolumes} 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout: legacy client fscid: -1 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout: 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout: No filesystems configured 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: mon.b@-1(synchronizing).osd e0 _set_cache_ratios kv ratio 0.25 inc ratio 0.375 full ratio 0.375 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: mon.b@-1(synchronizing).osd e0 register_cache_with_pcm pcm target: 2147483648 pcm max: 1020054732 pcm min: 134217728 inc_osd_cache size: 1 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: mon.b@-1(synchronizing).osd e1 e1: 0 total, 0 up, 0 in 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: mon.b@-1(synchronizing).osd e2 e2: 0 total, 0 up, 0 in 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: mon.b@-1(synchronizing).osd e3 e3: 0 total, 0 up, 0 in 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: mon.b@-1(synchronizing).osd e4 e4: 0 total, 0 up, 0 in 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: mon.b@-1(synchronizing).osd e4 crush map has features 3314932999778484224, adjusting msgr requires 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: mon.b@-1(synchronizing).osd e4 crush map has features 288514050185494528, adjusting msgr requires 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: mon.b@-1(synchronizing).osd e4 crush map has features 288514050185494528, adjusting msgr requires 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: mon.b@-1(synchronizing).osd e4 crush map has features 288514050185494528, adjusting msgr requires 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: mkfs 3fd6e214-1a0e-11f1-b256-99cfc35f3328 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: mon.a is new leader, mons a in quorum (ranks 0) 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: mon.a is new leader, mons a in quorum (ranks 0) 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: monmap epoch 1 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: last_changed 2026-03-07T10:13:48.241132+0000 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: created 2026-03-07T10:13:48.241132+0000 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: min_mon_release 19 (squid) 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: election_strategy: 1 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: 0: [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] mon.a 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: fsmap 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: osdmap e1: 0 total, 0 up, 0 in 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: mgrmap e1: no daemons active 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/2682140876' entity='client.admin' 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/3223506923' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/4050763850' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/1167621262' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/1812127146' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/1825768623' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: Activating manager daemon a 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: mgrmap e2: a(active, starting, since 0.00399133s) 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14100 192.168.123.101:0/3671220991' entity='mgr.a' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14100 192.168.123.101:0/3671220991' entity='mgr.a' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14100 192.168.123.101:0/3671220991' entity='mgr.a' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14100 192.168.123.101:0/3671220991' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14100 192.168.123.101:0/3671220991' entity='mgr.a' cmd=[{"prefix": "mgr metadata", "who": "a", "id": "a"}]: dispatch 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: Manager daemon a is now available 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14100 192.168.123.101:0/3671220991' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/mirror_snapshot_schedule"}]: dispatch 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14100 192.168.123.101:0/3671220991' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/trash_purge_schedule"}]: dispatch 2026-03-07T10:14:47.102 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14100 192.168.123.101:0/3671220991' entity='mgr.a' 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14100 192.168.123.101:0/3671220991' entity='mgr.a' 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14100 192.168.123.101:0/3671220991' entity='mgr.a' 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: mgrmap e3: a(active, since 1.01035s) 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: mgrmap e4: a(active, since 2s) 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/3072353561' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/527461182' entity='client.admin' cmd=[{"prefix": "config assimilate-conf"}]: dispatch 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/527461182' entity='client.admin' cmd='[{"prefix": "config assimilate-conf"}]': finished 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/2063341138' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "cephadm"}]: dispatch 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/2063341138' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "cephadm"}]': finished 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: mgrmap e5: a(active, since 3s) 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/615134430' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: Active manager daemon a restarted 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: Activating manager daemon a 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: osdmap e2: 0 total, 0 up, 0 in 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: mgrmap e6: a(active, starting, since 0.0215224s) 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' cmd=[{"prefix": "mgr metadata", "who": "a", "id": "a"}]: dispatch 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: Manager daemon a is now available 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/mirror_snapshot_schedule"}]: dispatch 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/trash_purge_schedule"}]: dispatch 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: Found migration_current of "None". Setting to last migration. 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: mgrmap e7: a(active, since 1.03013s) 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='client.14128 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='client.14128 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='client.14136 -' entity='client.admin' cmd=[{"prefix": "orch set backend", "module_name": "cephadm", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: [07/Mar/2026:10:14:15] ENGINE Bus STARTING 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='client.14138 -' entity='client.admin' cmd=[{"prefix": "cephadm set-user", "user": "root", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: [07/Mar/2026:10:14:15] ENGINE Serving on https://192.168.123.101:7150 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: [07/Mar/2026:10:14:15] ENGINE Client ('192.168.123.101', 45768) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: [07/Mar/2026:10:14:15] ENGINE Serving on http://192.168.123.101:8765 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: [07/Mar/2026:10:14:15] ENGINE Bus STARTED 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='client.14140 -' entity='client.admin' cmd=[{"prefix": "cephadm generate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: Generating ssh key... 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: mgrmap e8: a(active, since 2s) 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='client.14142 -' entity='client.admin' cmd=[{"prefix": "cephadm get-pub-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='client.14144 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm01", "addr": "192.168.123.101", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: Deploying cephadm binary to vm01 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: Added host vm01 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='client.14146 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "unmanaged": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: Saving service mon spec with placement count:5 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='client.14148 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "unmanaged": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: Saving service mgr spec with placement count:2 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/706353935' entity='client.admin' 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/933739697' entity='client.admin' 2026-03-07T10:14:47.103 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/2630861903' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "dashboard"}]: dispatch 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm01", "caps": []}]: dispatch 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14124 192.168.123.101:0/2521618598' entity='mgr.a' cmd='[{"prefix": "auth get-or-create", "entity": "client.agent.vm01", "caps": []}]': finished 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/2630861903' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "dashboard"}]': finished 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: mgrmap e9: a(active, since 8s) 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/43663662' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: Active manager daemon a restarted 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: Activating manager daemon a 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: osdmap e3: 0 total, 0 up, 0 in 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: mgrmap e10: a(active, starting, since 0.0197641s) 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mgr metadata", "who": "a", "id": "a"}]: dispatch 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: Manager daemon a is now available 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/mirror_snapshot_schedule"}]: dispatch 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/trash_purge_schedule"}]: dispatch 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm01", "caps": []}]: dispatch 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: mgrmap e11: a(active, since 1.02246s) 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: Deploying daemon agent.vm01 on vm01 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: [07/Mar/2026:10:14:33] ENGINE Bus STARTING 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='client.14168 -' entity='client.admin' cmd=[{"prefix": "dashboard create-self-signed-cert", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: [07/Mar/2026:10:14:33] ENGINE Serving on http://192.168.123.101:8765 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: [07/Mar/2026:10:14:33] ENGINE Serving on https://192.168.123.101:7150 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: [07/Mar/2026:10:14:33] ENGINE Bus STARTED 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: [07/Mar/2026:10:14:33] ENGINE Client ('192.168.123.101', 49712) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='client.14170 -' entity='client.admin' cmd=[{"prefix": "dashboard ac-user-create", "username": "admin", "rolename": "administrator", "force_password": true, "pwd_update_required": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/2795827174' entity='client.admin' cmd=[{"prefix": "config get", "who": "mgr", "key": "mgr/dashboard/ssl_server_port"}]: dispatch 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: mgrmap e12: a(active, since 2s) 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/985324366' entity='client.admin' 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/3409756251' entity='client.admin' 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='client.14178 -' entity='client.admin' cmd=[{"prefix": "orch client-keyring set", "entity": "client.admin", "placement": "*", "mode": "0755", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: Updating vm01:/etc/ceph/ceph.conf 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: Updating vm01:/var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/config/ceph.conf 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.104 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: Updating vm01:/etc/ceph/ceph.client.admin.keyring 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: Updating vm01:/var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/config/ceph.client.admin.keyring 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='client.14180 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm04", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: Deploying cephadm binary to vm04 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: mgrmap e13: a(active, since 6s) 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: Added host vm04 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: Updating vm04:/etc/ceph/ceph.conf 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: Updating vm04:/var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/config/ceph.conf 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='client.14182 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: Updating vm04:/etc/ceph/ceph.client.admin.keyring 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: Updating vm04:/var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/config/ceph.client.admin.keyring 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm04", "caps": []}]: dispatch 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd='[{"prefix": "auth get-or-create", "entity": "client.agent.vm04", "caps": []}]': finished 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: Deploying daemon agent.vm04 on vm04 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='client.14184 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm07", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: Deploying cephadm binary to vm07 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: Added host vm07 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd/host:vm07", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: Updating vm07:/etc/ceph/ceph.conf 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get-or-create", "entity": "client.agent.vm07", "caps": []}]: dispatch 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd='[{"prefix": "auth get-or-create", "entity": "client.agent.vm07", "caps": []}]': finished 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/3833318551' entity='client.admin' cmd=[{"prefix": "osd crush tunables", "profile": "default"}]: dispatch 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: Updating vm07:/var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/config/ceph.conf 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='client.14186 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: Updating vm07:/etc/ceph/ceph.client.admin.keyring 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: Updating vm07:/var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/config/ceph.client.admin.keyring 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/3833318551' entity='client.admin' cmd='[{"prefix": "osd crush tunables", "profile": "default"}]': finished 2026-03-07T10:14:47.105 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: osdmap e4: 0 total, 0 up, 0 in 2026-03-07T10:14:47.106 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.106 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.106 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.106 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: Deploying daemon agent.vm07 on vm07 2026-03-07T10:14:47.106 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:14:47.106 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:14:47.106 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:14:47.106 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.106 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:14:47.106 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:14:47.106 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:14:47.106 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.106 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.106 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:14:47.106 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:14:47.106 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:14:47.106 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.106 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-07T10:14:47.106 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:14:47.106 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.106 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.106 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:47.106 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:46 vm04 ceph-mon[49935]: mon.b@-1(synchronizing).paxosservice(auth 1..6) refresh upgraded, format 0 -> 3 2026-03-07T10:14:47.261 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/config/ceph.conf 2026-03-07T10:14:52.056 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-07T10:14:52.057 INFO:teuthology.orchestra.run.vm07.stdout:{"epoch":2,"fsid":"3fd6e214-1a0e-11f1-b256-99cfc35f3328","modified":"2026-03-07T10:14:46.711454Z","created":"2026-03-07T10:13:48.241132Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"},{"rank":1,"name":"b","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:3300","nonce":0},{"type":"v1","addr":"192.168.123.104:6789","nonce":0}]},"addr":"192.168.123.104:6789/0","public_addr":"192.168.123.104:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0,1]} 2026-03-07T10:14:52.057 INFO:teuthology.orchestra.run.vm07.stderr:dumped monmap epoch 2 2026-03-07T10:14:52.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:51 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:14:52.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:51 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:14:52.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:51 vm04 ceph-mon[49935]: mon.a calling monitor election 2026-03-07T10:14:52.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:51 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:14:52.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:51 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:14:52.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:51 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:14:52.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:51 vm04 ceph-mon[49935]: mon.b calling monitor election 2026-03-07T10:14:52.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:51 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:14:52.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:51 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:14:52.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:51 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:14:52.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:51 vm04 ceph-mon[49935]: mon.a is new leader, mons a,b in quorum (ranks 0,1) 2026-03-07T10:14:52.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:51 vm04 ceph-mon[49935]: monmap epoch 2 2026-03-07T10:14:52.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:51 vm04 ceph-mon[49935]: fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 2026-03-07T10:14:52.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:51 vm04 ceph-mon[49935]: last_changed 2026-03-07T10:14:46.711454+0000 2026-03-07T10:14:52.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:51 vm04 ceph-mon[49935]: created 2026-03-07T10:13:48.241132+0000 2026-03-07T10:14:52.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:51 vm04 ceph-mon[49935]: min_mon_release 19 (squid) 2026-03-07T10:14:52.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:51 vm04 ceph-mon[49935]: election_strategy: 1 2026-03-07T10:14:52.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:51 vm04 ceph-mon[49935]: 0: [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] mon.a 2026-03-07T10:14:52.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:51 vm04 ceph-mon[49935]: 1: [v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0] mon.b 2026-03-07T10:14:52.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:51 vm04 ceph-mon[49935]: fsmap 2026-03-07T10:14:52.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:51 vm04 ceph-mon[49935]: osdmap e4: 0 total, 0 up, 0 in 2026-03-07T10:14:52.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:51 vm04 ceph-mon[49935]: mgrmap e13: a(active, since 20s) 2026-03-07T10:14:52.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:51 vm04 ceph-mon[49935]: overall HEALTH_OK 2026-03-07T10:14:52.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:51 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:14:52.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:51 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:14:52.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:51 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:52.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:51 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:52.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:51 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:52.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:51 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:52.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:51 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:52.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:51 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:14:52.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:51 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:14:52.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:51 vm01 ceph-mon[49602]: mon.a calling monitor election 2026-03-07T10:14:52.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:51 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:14:52.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:51 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:14:52.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:51 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:14:52.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:51 vm01 ceph-mon[49602]: mon.b calling monitor election 2026-03-07T10:14:52.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:51 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:14:52.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:51 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:14:52.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:51 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:14:52.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:51 vm01 ceph-mon[49602]: mon.a is new leader, mons a,b in quorum (ranks 0,1) 2026-03-07T10:14:52.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:51 vm01 ceph-mon[49602]: monmap epoch 2 2026-03-07T10:14:52.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:51 vm01 ceph-mon[49602]: fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 2026-03-07T10:14:52.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:51 vm01 ceph-mon[49602]: last_changed 2026-03-07T10:14:46.711454+0000 2026-03-07T10:14:52.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:51 vm01 ceph-mon[49602]: created 2026-03-07T10:13:48.241132+0000 2026-03-07T10:14:52.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:51 vm01 ceph-mon[49602]: min_mon_release 19 (squid) 2026-03-07T10:14:52.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:51 vm01 ceph-mon[49602]: election_strategy: 1 2026-03-07T10:14:52.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:51 vm01 ceph-mon[49602]: 0: [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] mon.a 2026-03-07T10:14:52.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:51 vm01 ceph-mon[49602]: 1: [v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0] mon.b 2026-03-07T10:14:52.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:51 vm01 ceph-mon[49602]: fsmap 2026-03-07T10:14:52.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:51 vm01 ceph-mon[49602]: osdmap e4: 0 total, 0 up, 0 in 2026-03-07T10:14:52.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:51 vm01 ceph-mon[49602]: mgrmap e13: a(active, since 20s) 2026-03-07T10:14:52.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:51 vm01 ceph-mon[49602]: overall HEALTH_OK 2026-03-07T10:14:52.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:51 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:14:52.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:51 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:14:52.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:51 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:52.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:51 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:52.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:51 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:52.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:51 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:52.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:51 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:53.131 INFO:tasks.cephadm:Waiting for 3 mons in monmap... 2026-03-07T10:14:53.131 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph mon dump -f json 2026-03-07T10:14:53.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:52 vm01 ceph-mon[49602]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:14:53.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:52 vm01 ceph-mon[49602]: Updating vm01:/etc/ceph/ceph.conf 2026-03-07T10:14:53.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:52 vm01 ceph-mon[49602]: Updating vm04:/etc/ceph/ceph.conf 2026-03-07T10:14:53.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:52 vm01 ceph-mon[49602]: Updating vm07:/etc/ceph/ceph.conf 2026-03-07T10:14:53.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:52 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:53.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:52 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:53.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:52 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:53.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:52 vm01 ceph-mon[49602]: Updating vm07:/var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/config/ceph.conf 2026-03-07T10:14:53.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:52 vm01 ceph-mon[49602]: Updating vm01:/var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/config/ceph.conf 2026-03-07T10:14:53.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:52 vm01 ceph-mon[49602]: Updating vm04:/var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/config/ceph.conf 2026-03-07T10:14:53.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:52 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:53.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:52 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:53.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:52 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:53.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:52 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:53.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:52 vm01 ceph-mon[49602]: from='client.? 192.168.123.107:0/2875554572' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-07T10:14:53.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:52 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:53.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:52 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:53.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:52 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:53.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:52 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-07T10:14:53.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:52 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:14:53.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:52 vm01 ceph-mon[49602]: Deploying daemon mon.c on vm07 2026-03-07T10:14:53.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:52 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:14:53.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:52 vm04 ceph-mon[49935]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:14:53.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:52 vm04 ceph-mon[49935]: Updating vm01:/etc/ceph/ceph.conf 2026-03-07T10:14:53.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:52 vm04 ceph-mon[49935]: Updating vm04:/etc/ceph/ceph.conf 2026-03-07T10:14:53.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:52 vm04 ceph-mon[49935]: Updating vm07:/etc/ceph/ceph.conf 2026-03-07T10:14:53.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:52 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:53.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:52 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:53.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:52 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:53.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:52 vm04 ceph-mon[49935]: Updating vm07:/var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/config/ceph.conf 2026-03-07T10:14:53.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:52 vm04 ceph-mon[49935]: Updating vm01:/var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/config/ceph.conf 2026-03-07T10:14:53.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:52 vm04 ceph-mon[49935]: Updating vm04:/var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/config/ceph.conf 2026-03-07T10:14:53.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:52 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:53.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:52 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:53.351 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:52 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:53.351 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:52 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:53.351 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:52 vm04 ceph-mon[49935]: from='client.? 192.168.123.107:0/2875554572' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-07T10:14:53.351 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:52 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:53.351 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:52 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:53.351 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:52 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:53.351 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:52 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-07T10:14:53.351 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:52 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:14:53.351 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:52 vm04 ceph-mon[49935]: Deploying daemon mon.c on vm07 2026-03-07T10:14:53.351 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:52 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:14:53.497 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.c/config 2026-03-07T10:14:53.974 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:14:53 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:14:53.710+0000 7fe879f25640 -1 mgr.server handle_report got status from non-daemon mon.b 2026-03-07T10:14:58.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:58 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:14:58.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:58 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:14:58.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:58 vm01 ceph-mon[49602]: mon.a calling monitor election 2026-03-07T10:14:58.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:58 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:14:58.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:58 vm01 ceph-mon[49602]: mon.b calling monitor election 2026-03-07T10:14:58.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:58 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:14:58.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:58 vm01 ceph-mon[49602]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:14:58.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:58 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:14:58.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:58 vm01 ceph-mon[49602]: mon.c calling monitor election 2026-03-07T10:14:58.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:58 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:14:58.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:58 vm01 ceph-mon[49602]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:14:58.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:58 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:14:58.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:58 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:14:58.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:58 vm01 ceph-mon[49602]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:14:58.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:58 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:14:58.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:58 vm01 ceph-mon[49602]: mon.a is new leader, mons a,b,c in quorum (ranks 0,1,2) 2026-03-07T10:14:58.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:58 vm01 ceph-mon[49602]: monmap epoch 3 2026-03-07T10:14:58.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:58 vm01 ceph-mon[49602]: fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 2026-03-07T10:14:58.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:58 vm01 ceph-mon[49602]: last_changed 2026-03-07T10:14:53.567960+0000 2026-03-07T10:14:58.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:58 vm01 ceph-mon[49602]: created 2026-03-07T10:13:48.241132+0000 2026-03-07T10:14:58.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:58 vm01 ceph-mon[49602]: min_mon_release 19 (squid) 2026-03-07T10:14:58.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:58 vm01 ceph-mon[49602]: election_strategy: 1 2026-03-07T10:14:58.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:58 vm01 ceph-mon[49602]: 0: [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] mon.a 2026-03-07T10:14:58.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:58 vm01 ceph-mon[49602]: 1: [v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0] mon.b 2026-03-07T10:14:58.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:58 vm01 ceph-mon[49602]: 2: [v2:192.168.123.107:3300/0,v1:192.168.123.107:6789/0] mon.c 2026-03-07T10:14:58.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:58 vm01 ceph-mon[49602]: fsmap 2026-03-07T10:14:58.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:58 vm01 ceph-mon[49602]: osdmap e4: 0 total, 0 up, 0 in 2026-03-07T10:14:58.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:58 vm01 ceph-mon[49602]: mgrmap e13: a(active, since 26s) 2026-03-07T10:14:58.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:58 vm01 ceph-mon[49602]: overall HEALTH_OK 2026-03-07T10:14:58.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:58 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:58.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:58 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:14:58.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:58 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:58.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:58 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:14:58.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:58 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:58.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:58 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:58.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:58 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:58.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:58 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:58.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:58 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:59.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:58 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:14:59.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:58 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:14:59.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:58 vm04 ceph-mon[49935]: mon.a calling monitor election 2026-03-07T10:14:59.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:58 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:14:59.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:58 vm04 ceph-mon[49935]: mon.b calling monitor election 2026-03-07T10:14:59.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:58 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:14:59.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:58 vm04 ceph-mon[49935]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:14:59.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:58 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:14:59.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:58 vm04 ceph-mon[49935]: mon.c calling monitor election 2026-03-07T10:14:59.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:58 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:14:59.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:58 vm04 ceph-mon[49935]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:14:59.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:58 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:14:59.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:58 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:14:59.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:58 vm04 ceph-mon[49935]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:14:59.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:58 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:14:59.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:58 vm04 ceph-mon[49935]: mon.a is new leader, mons a,b,c in quorum (ranks 0,1,2) 2026-03-07T10:14:59.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:58 vm04 ceph-mon[49935]: monmap epoch 3 2026-03-07T10:14:59.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:58 vm04 ceph-mon[49935]: fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 2026-03-07T10:14:59.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:58 vm04 ceph-mon[49935]: last_changed 2026-03-07T10:14:53.567960+0000 2026-03-07T10:14:59.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:58 vm04 ceph-mon[49935]: created 2026-03-07T10:13:48.241132+0000 2026-03-07T10:14:59.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:58 vm04 ceph-mon[49935]: min_mon_release 19 (squid) 2026-03-07T10:14:59.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:58 vm04 ceph-mon[49935]: election_strategy: 1 2026-03-07T10:14:59.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:58 vm04 ceph-mon[49935]: 0: [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] mon.a 2026-03-07T10:14:59.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:58 vm04 ceph-mon[49935]: 1: [v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0] mon.b 2026-03-07T10:14:59.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:58 vm04 ceph-mon[49935]: 2: [v2:192.168.123.107:3300/0,v1:192.168.123.107:6789/0] mon.c 2026-03-07T10:14:59.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:58 vm04 ceph-mon[49935]: fsmap 2026-03-07T10:14:59.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:58 vm04 ceph-mon[49935]: osdmap e4: 0 total, 0 up, 0 in 2026-03-07T10:14:59.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:58 vm04 ceph-mon[49935]: mgrmap e13: a(active, since 26s) 2026-03-07T10:14:59.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:58 vm04 ceph-mon[49935]: overall HEALTH_OK 2026-03-07T10:14:59.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:58 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:59.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:58 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:14:59.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:58 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:59.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:58 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:14:59.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:58 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:59.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:58 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:59.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:58 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:59.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:58 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:59.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:58 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:59.744 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:59 vm04 ceph-mon[49935]: Updating vm01:/etc/ceph/ceph.conf 2026-03-07T10:14:59.744 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:59 vm04 ceph-mon[49935]: Updating vm04:/etc/ceph/ceph.conf 2026-03-07T10:14:59.744 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:59 vm04 ceph-mon[49935]: Updating vm07:/etc/ceph/ceph.conf 2026-03-07T10:14:59.744 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:59 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:59.744 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:59 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:59.744 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:59 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:59.744 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:59 vm04 ceph-mon[49935]: Updating vm04:/var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/config/ceph.conf 2026-03-07T10:14:59.744 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:59 vm04 ceph-mon[49935]: Updating vm01:/var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/config/ceph.conf 2026-03-07T10:14:59.744 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:59 vm04 ceph-mon[49935]: Updating vm07:/var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/config/ceph.conf 2026-03-07T10:14:59.744 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:59 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:59.744 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:59 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:59.744 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:59 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:59.744 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:59 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:59.744 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:59 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:59.744 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:59 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:14:59.928 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-07T10:14:59.928 INFO:teuthology.orchestra.run.vm07.stdout:{"epoch":3,"fsid":"3fd6e214-1a0e-11f1-b256-99cfc35f3328","modified":"2026-03-07T10:14:53.567960Z","created":"2026-03-07T10:13:48.241132Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"},{"rank":1,"name":"b","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:3300","nonce":0},{"type":"v1","addr":"192.168.123.104:6789","nonce":0}]},"addr":"192.168.123.104:6789/0","public_addr":"192.168.123.104:6789/0","priority":0,"weight":0,"crush_location":"{}"},{"rank":2,"name":"c","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:3300","nonce":0},{"type":"v1","addr":"192.168.123.107:6789","nonce":0}]},"addr":"192.168.123.107:6789/0","public_addr":"192.168.123.107:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0,1,2]} 2026-03-07T10:14:59.928 INFO:teuthology.orchestra.run.vm07.stderr:dumped monmap epoch 3 2026-03-07T10:14:59.997 INFO:tasks.cephadm:Generating final ceph.conf file... 2026-03-07T10:14:59.997 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph config generate-minimal-conf 2026-03-07T10:15:00.022 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:59 vm01 ceph-mon[49602]: Updating vm01:/etc/ceph/ceph.conf 2026-03-07T10:15:00.022 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:59 vm01 ceph-mon[49602]: Updating vm04:/etc/ceph/ceph.conf 2026-03-07T10:15:00.022 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:59 vm01 ceph-mon[49602]: Updating vm07:/etc/ceph/ceph.conf 2026-03-07T10:15:00.022 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:59 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:00.022 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:59 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:00.022 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:59 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:00.022 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:59 vm01 ceph-mon[49602]: Updating vm04:/var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/config/ceph.conf 2026-03-07T10:15:00.022 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:59 vm01 ceph-mon[49602]: Updating vm01:/var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/config/ceph.conf 2026-03-07T10:15:00.022 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:59 vm01 ceph-mon[49602]: Updating vm07:/var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/config/ceph.conf 2026-03-07T10:15:00.022 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:59 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:00.022 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:59 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:00.022 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:59 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:00.022 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:59 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:00.022 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:59 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:00.022 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:59 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:00.022 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:59 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:00.022 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:59 vm01 ceph-mon[49602]: Reconfiguring mon.a (unknown last config time)... 2026-03-07T10:15:00.022 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:59 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-07T10:15:00.022 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:59 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-07T10:15:00.022 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:59 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:00.022 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:59 vm01 ceph-mon[49602]: Reconfiguring daemon mon.a on vm01 2026-03-07T10:15:00.022 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:59 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:00.022 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:59 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:00.022 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:59 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-07T10:15:00.023 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:59 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-07T10:15:00.023 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:59 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:00.023 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:59 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:15:00.023 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:59 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:00.023 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:59 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:00.023 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:59 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-07T10:15:00.023 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:59 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-07T10:15:00.023 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:14:59 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:00.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:59 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:00.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:59 vm04 ceph-mon[49935]: Reconfiguring mon.a (unknown last config time)... 2026-03-07T10:15:00.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:59 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-07T10:15:00.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:59 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-07T10:15:00.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:59 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:00.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:59 vm04 ceph-mon[49935]: Reconfiguring daemon mon.a on vm01 2026-03-07T10:15:00.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:59 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:00.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:59 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:00.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:59 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-07T10:15:00.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:59 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-07T10:15:00.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:59 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:00.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:59 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:15:00.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:59 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:00.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:59 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:00.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:59 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-07T10:15:00.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:59 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-07T10:15:00.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:14:59 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:00.179 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:15:00.507 INFO:teuthology.orchestra.run.vm01.stdout:# minimal ceph.conf for 3fd6e214-1a0e-11f1-b256-99cfc35f3328 2026-03-07T10:15:00.507 INFO:teuthology.orchestra.run.vm01.stdout:[global] 2026-03-07T10:15:00.507 INFO:teuthology.orchestra.run.vm01.stdout: fsid = 3fd6e214-1a0e-11f1-b256-99cfc35f3328 2026-03-07T10:15:00.507 INFO:teuthology.orchestra.run.vm01.stdout: mon_host = [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] [v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0] [v2:192.168.123.107:3300/0,v1:192.168.123.107:6789/0] 2026-03-07T10:15:00.590 INFO:tasks.cephadm:Distributing (final) config and client.admin keyring... 2026-03-07T10:15:00.590 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-03-07T10:15:00.590 DEBUG:teuthology.orchestra.run.vm01:> sudo dd of=/etc/ceph/ceph.conf 2026-03-07T10:15:00.659 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-03-07T10:15:00.660 DEBUG:teuthology.orchestra.run.vm01:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-07T10:15:00.728 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-03-07T10:15:00.728 DEBUG:teuthology.orchestra.run.vm04:> sudo dd of=/etc/ceph/ceph.conf 2026-03-07T10:15:00.754 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-03-07T10:15:00.755 DEBUG:teuthology.orchestra.run.vm04:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-07T10:15:00.819 DEBUG:teuthology.orchestra.run.vm07:> set -ex 2026-03-07T10:15:00.819 DEBUG:teuthology.orchestra.run.vm07:> sudo dd of=/etc/ceph/ceph.conf 2026-03-07T10:15:00.846 DEBUG:teuthology.orchestra.run.vm07:> set -ex 2026-03-07T10:15:00.847 DEBUG:teuthology.orchestra.run.vm07:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-07T10:15:00.904 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:15:00 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:15:00.568+0000 7fe879f25640 -1 mgr.server handle_report got status from non-daemon mon.c 2026-03-07T10:15:00.906 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:00 vm01 ceph-mon[49602]: Reconfiguring mon.b (monmap changed)... 2026-03-07T10:15:00.906 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:00 vm01 ceph-mon[49602]: Reconfiguring daemon mon.b on vm04 2026-03-07T10:15:00.906 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:00 vm01 ceph-mon[49602]: Reconfiguring mon.c (monmap changed)... 2026-03-07T10:15:00.906 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:00 vm01 ceph-mon[49602]: Reconfiguring daemon mon.c on vm07 2026-03-07T10:15:00.906 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:00 vm01 ceph-mon[49602]: pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:15:00.906 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:00 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:00.906 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:00 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:00.906 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:00 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:00.906 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:00 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:00.906 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:00 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:00.906 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:00 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:00.906 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:00 vm01 ceph-mon[49602]: from='client.? 192.168.123.107:0/2549510839' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-07T10:15:00.906 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:00 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/4189815186' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:00.913 INFO:tasks.cephadm:Adding mgr.a on vm01 2026-03-07T10:15:00.913 INFO:tasks.cephadm:Adding mgr.b on vm04 2026-03-07T10:15:00.913 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph orch apply mgr '2;vm01=a;vm04=b' 2026-03-07T10:15:01.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:00 vm04 ceph-mon[49935]: Reconfiguring mon.b (monmap changed)... 2026-03-07T10:15:01.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:00 vm04 ceph-mon[49935]: Reconfiguring daemon mon.b on vm04 2026-03-07T10:15:01.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:00 vm04 ceph-mon[49935]: Reconfiguring mon.c (monmap changed)... 2026-03-07T10:15:01.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:00 vm04 ceph-mon[49935]: Reconfiguring daemon mon.c on vm07 2026-03-07T10:15:01.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:00 vm04 ceph-mon[49935]: pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:15:01.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:00 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:01.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:00 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:01.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:00 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:01.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:00 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:01.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:00 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:01.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:00 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:01.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:00 vm04 ceph-mon[49935]: from='client.? 192.168.123.107:0/2549510839' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-07T10:15:01.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:00 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/4189815186' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:01.119 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.c/config 2026-03-07T10:15:01.454 INFO:teuthology.orchestra.run.vm07.stdout:Scheduled mgr update... 2026-03-07T10:15:01.532 DEBUG:teuthology.orchestra.run.vm04:mgr.b> sudo journalctl -f -n 0 -u ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@mgr.b.service 2026-03-07T10:15:01.534 INFO:tasks.cephadm:Deploying OSDs... 2026-03-07T10:15:01.534 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-03-07T10:15:01.534 DEBUG:teuthology.orchestra.run.vm01:> dd if=/scratch_devs of=/dev/stdout 2026-03-07T10:15:01.552 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-07T10:15:01.552 DEBUG:teuthology.orchestra.run.vm01:> ls /dev/[sv]d? 2026-03-07T10:15:01.608 INFO:teuthology.orchestra.run.vm01.stdout:/dev/vda 2026-03-07T10:15:01.608 INFO:teuthology.orchestra.run.vm01.stdout:/dev/vdb 2026-03-07T10:15:01.608 INFO:teuthology.orchestra.run.vm01.stdout:/dev/vdc 2026-03-07T10:15:01.608 INFO:teuthology.orchestra.run.vm01.stdout:/dev/vdd 2026-03-07T10:15:01.608 INFO:teuthology.orchestra.run.vm01.stdout:/dev/vde 2026-03-07T10:15:01.608 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-07T10:15:01.608 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-07T10:15:01.608 DEBUG:teuthology.orchestra.run.vm01:> stat /dev/vdb 2026-03-07T10:15:01.668 INFO:teuthology.orchestra.run.vm01.stdout: File: /dev/vdb 2026-03-07T10:15:01.668 INFO:teuthology.orchestra.run.vm01.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-07T10:15:01.668 INFO:teuthology.orchestra.run.vm01.stdout:Device: 6h/6d Inode: 221 Links: 1 Device type: fc,10 2026-03-07T10:15:01.668 INFO:teuthology.orchestra.run.vm01.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-07T10:15:01.668 INFO:teuthology.orchestra.run.vm01.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-07T10:15:01.668 INFO:teuthology.orchestra.run.vm01.stdout:Access: 2026-03-07 10:14:57.340968371 +0000 2026-03-07T10:15:01.668 INFO:teuthology.orchestra.run.vm01.stdout:Modify: 2026-03-07 10:13:11.776571624 +0000 2026-03-07T10:15:01.668 INFO:teuthology.orchestra.run.vm01.stdout:Change: 2026-03-07 10:13:11.776571624 +0000 2026-03-07T10:15:01.668 INFO:teuthology.orchestra.run.vm01.stdout: Birth: 2026-03-07 10:04:47.224000000 +0000 2026-03-07T10:15:01.669 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-07T10:15:01.736 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records in 2026-03-07T10:15:01.736 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records out 2026-03-07T10:15:01.736 INFO:teuthology.orchestra.run.vm01.stderr:512 bytes copied, 0.000126166 s, 4.1 MB/s 2026-03-07T10:15:01.738 DEBUG:teuthology.orchestra.run.vm01:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-07T10:15:01.797 DEBUG:teuthology.orchestra.run.vm01:> stat /dev/vdc 2026-03-07T10:15:01.858 INFO:teuthology.orchestra.run.vm01.stdout: File: /dev/vdc 2026-03-07T10:15:01.858 INFO:teuthology.orchestra.run.vm01.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-07T10:15:01.858 INFO:teuthology.orchestra.run.vm01.stdout:Device: 6h/6d Inode: 224 Links: 1 Device type: fc,20 2026-03-07T10:15:01.858 INFO:teuthology.orchestra.run.vm01.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-07T10:15:01.858 INFO:teuthology.orchestra.run.vm01.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-07T10:15:01.858 INFO:teuthology.orchestra.run.vm01.stdout:Access: 2026-03-07 10:14:57.345968375 +0000 2026-03-07T10:15:01.858 INFO:teuthology.orchestra.run.vm01.stdout:Modify: 2026-03-07 10:13:11.781571627 +0000 2026-03-07T10:15:01.858 INFO:teuthology.orchestra.run.vm01.stdout:Change: 2026-03-07 10:13:11.781571627 +0000 2026-03-07T10:15:01.858 INFO:teuthology.orchestra.run.vm01.stdout: Birth: 2026-03-07 10:04:47.228000000 +0000 2026-03-07T10:15:01.858 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-07T10:15:01.924 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records in 2026-03-07T10:15:01.924 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records out 2026-03-07T10:15:01.924 INFO:teuthology.orchestra.run.vm01.stderr:512 bytes copied, 0.000184545 s, 2.8 MB/s 2026-03-07T10:15:01.925 DEBUG:teuthology.orchestra.run.vm01:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-07T10:15:01.982 DEBUG:teuthology.orchestra.run.vm01:> stat /dev/vdd 2026-03-07T10:15:02.038 INFO:teuthology.orchestra.run.vm01.stdout: File: /dev/vdd 2026-03-07T10:15:02.038 INFO:teuthology.orchestra.run.vm01.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-07T10:15:02.038 INFO:teuthology.orchestra.run.vm01.stdout:Device: 6h/6d Inode: 256 Links: 1 Device type: fc,30 2026-03-07T10:15:02.038 INFO:teuthology.orchestra.run.vm01.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-07T10:15:02.038 INFO:teuthology.orchestra.run.vm01.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-07T10:15:02.038 INFO:teuthology.orchestra.run.vm01.stdout:Access: 2026-03-07 10:14:57.349968377 +0000 2026-03-07T10:15:02.038 INFO:teuthology.orchestra.run.vm01.stdout:Modify: 2026-03-07 10:13:11.765571617 +0000 2026-03-07T10:15:02.038 INFO:teuthology.orchestra.run.vm01.stdout:Change: 2026-03-07 10:13:11.765571617 +0000 2026-03-07T10:15:02.038 INFO:teuthology.orchestra.run.vm01.stdout: Birth: 2026-03-07 10:04:47.249000000 +0000 2026-03-07T10:15:02.039 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-07T10:15:02.102 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records in 2026-03-07T10:15:02.102 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records out 2026-03-07T10:15:02.102 INFO:teuthology.orchestra.run.vm01.stderr:512 bytes copied, 0.000182993 s, 2.8 MB/s 2026-03-07T10:15:02.103 DEBUG:teuthology.orchestra.run.vm01:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-07T10:15:02.158 INFO:journalctl@ceph.mgr.b.vm04.stdout:Mar 07 10:15:02 vm04 systemd[1]: Starting Ceph mgr.b for 3fd6e214-1a0e-11f1-b256-99cfc35f3328... 2026-03-07T10:15:02.160 DEBUG:teuthology.orchestra.run.vm01:> stat /dev/vde 2026-03-07T10:15:02.216 INFO:teuthology.orchestra.run.vm01.stdout: File: /dev/vde 2026-03-07T10:15:02.216 INFO:teuthology.orchestra.run.vm01.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-07T10:15:02.216 INFO:teuthology.orchestra.run.vm01.stdout:Device: 6h/6d Inode: 257 Links: 1 Device type: fc,40 2026-03-07T10:15:02.216 INFO:teuthology.orchestra.run.vm01.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-07T10:15:02.216 INFO:teuthology.orchestra.run.vm01.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-07T10:15:02.216 INFO:teuthology.orchestra.run.vm01.stdout:Access: 2026-03-07 10:14:57.354968380 +0000 2026-03-07T10:15:02.216 INFO:teuthology.orchestra.run.vm01.stdout:Modify: 2026-03-07 10:13:11.779571626 +0000 2026-03-07T10:15:02.216 INFO:teuthology.orchestra.run.vm01.stdout:Change: 2026-03-07 10:13:11.779571626 +0000 2026-03-07T10:15:02.216 INFO:teuthology.orchestra.run.vm01.stdout: Birth: 2026-03-07 10:04:47.253000000 +0000 2026-03-07T10:15:02.217 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-07T10:15:02.286 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records in 2026-03-07T10:15:02.286 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records out 2026-03-07T10:15:02.286 INFO:teuthology.orchestra.run.vm01.stderr:512 bytes copied, 0.00012788 s, 4.0 MB/s 2026-03-07T10:15:02.287 DEBUG:teuthology.orchestra.run.vm01:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-07T10:15:02.356 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-03-07T10:15:02.357 DEBUG:teuthology.orchestra.run.vm04:> dd if=/scratch_devs of=/dev/stdout 2026-03-07T10:15:02.381 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-07T10:15:02.381 DEBUG:teuthology.orchestra.run.vm04:> ls /dev/[sv]d? 2026-03-07T10:15:02.441 INFO:teuthology.orchestra.run.vm04.stdout:/dev/vda 2026-03-07T10:15:02.441 INFO:teuthology.orchestra.run.vm04.stdout:/dev/vdb 2026-03-07T10:15:02.441 INFO:teuthology.orchestra.run.vm04.stdout:/dev/vdc 2026-03-07T10:15:02.441 INFO:teuthology.orchestra.run.vm04.stdout:/dev/vdd 2026-03-07T10:15:02.441 INFO:teuthology.orchestra.run.vm04.stdout:/dev/vde 2026-03-07T10:15:02.441 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-07T10:15:02.441 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-07T10:15:02.441 DEBUG:teuthology.orchestra.run.vm04:> stat /dev/vdb 2026-03-07T10:15:02.450 INFO:journalctl@ceph.mgr.b.vm04.stdout:Mar 07 10:15:02 vm04 podman[51390]: 2026-03-07 10:15:02.158631753 +0000 UTC m=+0.018668183 container create 8062c9e99e6c88e70eddc182da5c75a1c37b4f83ae502a02bbb86f6a3ea384b9 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-b, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git) 2026-03-07T10:15:02.450 INFO:journalctl@ceph.mgr.b.vm04.stdout:Mar 07 10:15:02 vm04 podman[51390]: 2026-03-07 10:15:02.187476829 +0000 UTC m=+0.047513259 container init 8062c9e99e6c88e70eddc182da5c75a1c37b4f83ae502a02bbb86f6a3ea384b9 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-b, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8) 2026-03-07T10:15:02.450 INFO:journalctl@ceph.mgr.b.vm04.stdout:Mar 07 10:15:02 vm04 podman[51390]: 2026-03-07 10:15:02.191657041 +0000 UTC m=+0.051693471 container start 8062c9e99e6c88e70eddc182da5c75a1c37b4f83ae502a02bbb86f6a3ea384b9 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-b, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) 2026-03-07T10:15:02.450 INFO:journalctl@ceph.mgr.b.vm04.stdout:Mar 07 10:15:02 vm04 bash[51390]: 8062c9e99e6c88e70eddc182da5c75a1c37b4f83ae502a02bbb86f6a3ea384b9 2026-03-07T10:15:02.450 INFO:journalctl@ceph.mgr.b.vm04.stdout:Mar 07 10:15:02 vm04 podman[51390]: 2026-03-07 10:15:02.148656067 +0000 UTC m=+0.008692497 image pull 8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1 harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0 2026-03-07T10:15:02.450 INFO:journalctl@ceph.mgr.b.vm04.stdout:Mar 07 10:15:02 vm04 systemd[1]: Started Ceph mgr.b for 3fd6e214-1a0e-11f1-b256-99cfc35f3328. 2026-03-07T10:15:02.450 INFO:journalctl@ceph.mgr.b.vm04.stdout:Mar 07 10:15:02 vm04 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-b[51400]: 2026-03-07T10:15:02.448+0000 7fc663fe3100 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-07T10:15:02.479 INFO:teuthology.orchestra.run.vm04.stdout: File: /dev/vdb 2026-03-07T10:15:02.479 INFO:teuthology.orchestra.run.vm04.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-07T10:15:02.479 INFO:teuthology.orchestra.run.vm04.stdout:Device: 6h/6d Inode: 221 Links: 1 Device type: fc,10 2026-03-07T10:15:02.479 INFO:teuthology.orchestra.run.vm04.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-07T10:15:02.479 INFO:teuthology.orchestra.run.vm04.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-07T10:15:02.479 INFO:teuthology.orchestra.run.vm04.stdout:Access: 2026-03-07 10:14:48.491956672 +0000 2026-03-07T10:15:02.479 INFO:teuthology.orchestra.run.vm04.stdout:Modify: 2026-03-07 10:13:12.302758786 +0000 2026-03-07T10:15:02.479 INFO:teuthology.orchestra.run.vm04.stdout:Change: 2026-03-07 10:13:12.302758786 +0000 2026-03-07T10:15:02.479 INFO:teuthology.orchestra.run.vm04.stdout: Birth: 2026-03-07 10:05:44.247000000 +0000 2026-03-07T10:15:02.479 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-07T10:15:02.546 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records in 2026-03-07T10:15:02.547 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records out 2026-03-07T10:15:02.547 INFO:teuthology.orchestra.run.vm04.stderr:512 bytes copied, 0.000102191 s, 5.0 MB/s 2026-03-07T10:15:02.548 DEBUG:teuthology.orchestra.run.vm04:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-07T10:15:02.610 DEBUG:teuthology.orchestra.run.vm04:> stat /dev/vdc 2026-03-07T10:15:02.671 INFO:teuthology.orchestra.run.vm04.stdout: File: /dev/vdc 2026-03-07T10:15:02.671 INFO:teuthology.orchestra.run.vm04.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-07T10:15:02.671 INFO:teuthology.orchestra.run.vm04.stdout:Device: 6h/6d Inode: 222 Links: 1 Device type: fc,20 2026-03-07T10:15:02.671 INFO:teuthology.orchestra.run.vm04.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-07T10:15:02.671 INFO:teuthology.orchestra.run.vm04.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-07T10:15:02.671 INFO:teuthology.orchestra.run.vm04.stdout:Access: 2026-03-07 10:14:48.495956674 +0000 2026-03-07T10:15:02.671 INFO:teuthology.orchestra.run.vm04.stdout:Modify: 2026-03-07 10:13:12.292758779 +0000 2026-03-07T10:15:02.671 INFO:teuthology.orchestra.run.vm04.stdout:Change: 2026-03-07 10:13:12.292758779 +0000 2026-03-07T10:15:02.671 INFO:teuthology.orchestra.run.vm04.stdout: Birth: 2026-03-07 10:05:44.251000000 +0000 2026-03-07T10:15:02.671 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-07T10:15:02.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:02 vm01 ceph-mon[49602]: from='client.14214 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "placement": "2;vm01=a;vm04=b", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:15:02.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:02 vm01 ceph-mon[49602]: Saving service mgr spec with placement vm01=a;vm04=b;count:2 2026-03-07T10:15:02.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:02 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:02.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:02 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:02.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:02 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:02.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:02 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:02.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:02 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:02.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:02 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.b", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-07T10:15:02.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:02 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.b", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished 2026-03-07T10:15:02.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:02 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-07T10:15:02.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:02 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:02.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:02 vm01 ceph-mon[49602]: Deploying daemon mgr.b on vm04 2026-03-07T10:15:02.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:02 vm01 ceph-mon[49602]: pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:15:02.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:02 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:02.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:02 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:02.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:02 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:02.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:02 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:02.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:02 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:02.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:02 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:02.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:02 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:02.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:02 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:02.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:02 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.a", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-07T10:15:02.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:02 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-07T10:15:02.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:02 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:02.740 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:02 vm04 ceph-mon[49935]: from='client.14214 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "placement": "2;vm01=a;vm04=b", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:15:02.740 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:02 vm04 ceph-mon[49935]: Saving service mgr spec with placement vm01=a;vm04=b;count:2 2026-03-07T10:15:02.740 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:02 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:02.740 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:02 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:02.741 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:02 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:02.741 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:02 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:02.741 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:02 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:02.741 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:02 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.b", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-07T10:15:02.741 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:02 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.b", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished 2026-03-07T10:15:02.741 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:02 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-07T10:15:02.741 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:02 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:02.741 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:02 vm04 ceph-mon[49935]: Deploying daemon mgr.b on vm04 2026-03-07T10:15:02.741 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:02 vm04 ceph-mon[49935]: pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:15:02.741 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:02 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:02.741 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:02 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:02.741 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:02 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:02.741 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:02 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:02.741 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:02 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:02.741 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:02 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:02.741 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:02 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:02.741 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:02 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:02.741 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:02 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.a", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-07T10:15:02.741 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:02 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-07T10:15:02.741 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:02 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:02.741 INFO:journalctl@ceph.mgr.b.vm04.stdout:Mar 07 10:15:02 vm04 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-b[51400]: 2026-03-07T10:15:02.592+0000 7fc663fe3100 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-07T10:15:02.744 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records in 2026-03-07T10:15:02.744 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records out 2026-03-07T10:15:02.744 INFO:teuthology.orchestra.run.vm04.stderr:512 bytes copied, 0.000150712 s, 3.4 MB/s 2026-03-07T10:15:02.745 DEBUG:teuthology.orchestra.run.vm04:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-07T10:15:02.823 DEBUG:teuthology.orchestra.run.vm04:> stat /dev/vdd 2026-03-07T10:15:02.890 INFO:teuthology.orchestra.run.vm04.stdout: File: /dev/vdd 2026-03-07T10:15:02.890 INFO:teuthology.orchestra.run.vm04.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-07T10:15:02.890 INFO:teuthology.orchestra.run.vm04.stdout:Device: 6h/6d Inode: 225 Links: 1 Device type: fc,30 2026-03-07T10:15:02.890 INFO:teuthology.orchestra.run.vm04.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-07T10:15:02.890 INFO:teuthology.orchestra.run.vm04.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-07T10:15:02.890 INFO:teuthology.orchestra.run.vm04.stdout:Access: 2026-03-07 10:14:48.502956678 +0000 2026-03-07T10:15:02.890 INFO:teuthology.orchestra.run.vm04.stdout:Modify: 2026-03-07 10:13:12.302758786 +0000 2026-03-07T10:15:02.890 INFO:teuthology.orchestra.run.vm04.stdout:Change: 2026-03-07 10:13:12.302758786 +0000 2026-03-07T10:15:02.890 INFO:teuthology.orchestra.run.vm04.stdout: Birth: 2026-03-07 10:05:44.266000000 +0000 2026-03-07T10:15:02.891 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-07T10:15:02.932 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:02 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:02.971 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records in 2026-03-07T10:15:02.971 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records out 2026-03-07T10:15:02.971 INFO:teuthology.orchestra.run.vm04.stderr:512 bytes copied, 0.000125023 s, 4.1 MB/s 2026-03-07T10:15:02.972 DEBUG:teuthology.orchestra.run.vm04:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-07T10:15:03.038 DEBUG:teuthology.orchestra.run.vm04:> stat /dev/vde 2026-03-07T10:15:03.111 INFO:teuthology.orchestra.run.vm04.stdout: File: /dev/vde 2026-03-07T10:15:03.111 INFO:teuthology.orchestra.run.vm04.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-07T10:15:03.111 INFO:teuthology.orchestra.run.vm04.stdout:Device: 6h/6d Inode: 257 Links: 1 Device type: fc,40 2026-03-07T10:15:03.111 INFO:teuthology.orchestra.run.vm04.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-07T10:15:03.111 INFO:teuthology.orchestra.run.vm04.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-07T10:15:03.111 INFO:teuthology.orchestra.run.vm04.stdout:Access: 2026-03-07 10:14:48.506956680 +0000 2026-03-07T10:15:03.111 INFO:teuthology.orchestra.run.vm04.stdout:Modify: 2026-03-07 10:13:12.296758782 +0000 2026-03-07T10:15:03.111 INFO:teuthology.orchestra.run.vm04.stdout:Change: 2026-03-07 10:13:12.296758782 +0000 2026-03-07T10:15:03.111 INFO:teuthology.orchestra.run.vm04.stdout: Birth: 2026-03-07 10:05:44.281000000 +0000 2026-03-07T10:15:03.111 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-07T10:15:03.196 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records in 2026-03-07T10:15:03.196 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records out 2026-03-07T10:15:03.196 INFO:teuthology.orchestra.run.vm04.stderr:512 bytes copied, 0.000132958 s, 3.9 MB/s 2026-03-07T10:15:03.197 DEBUG:teuthology.orchestra.run.vm04:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-07T10:15:03.225 DEBUG:teuthology.orchestra.run.vm07:> set -ex 2026-03-07T10:15:03.225 DEBUG:teuthology.orchestra.run.vm07:> dd if=/scratch_devs of=/dev/stdout 2026-03-07T10:15:03.240 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-07T10:15:03.240 DEBUG:teuthology.orchestra.run.vm07:> ls /dev/[sv]d? 2026-03-07T10:15:03.295 INFO:teuthology.orchestra.run.vm07.stdout:/dev/vda 2026-03-07T10:15:03.295 INFO:teuthology.orchestra.run.vm07.stdout:/dev/vdb 2026-03-07T10:15:03.295 INFO:teuthology.orchestra.run.vm07.stdout:/dev/vdc 2026-03-07T10:15:03.295 INFO:teuthology.orchestra.run.vm07.stdout:/dev/vdd 2026-03-07T10:15:03.295 INFO:teuthology.orchestra.run.vm07.stdout:/dev/vde 2026-03-07T10:15:03.295 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-07T10:15:03.295 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-07T10:15:03.295 DEBUG:teuthology.orchestra.run.vm07:> stat /dev/vdb 2026-03-07T10:15:03.352 INFO:teuthology.orchestra.run.vm07.stdout: File: /dev/vdb 2026-03-07T10:15:03.352 INFO:teuthology.orchestra.run.vm07.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-07T10:15:03.352 INFO:teuthology.orchestra.run.vm07.stdout:Device: 6h/6d Inode: 221 Links: 1 Device type: fc,10 2026-03-07T10:15:03.352 INFO:teuthology.orchestra.run.vm07.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-07T10:15:03.352 INFO:teuthology.orchestra.run.vm07.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-07T10:15:03.352 INFO:teuthology.orchestra.run.vm07.stdout:Access: 2026-03-07 10:14:55.608728125 +0000 2026-03-07T10:15:03.352 INFO:teuthology.orchestra.run.vm07.stdout:Modify: 2026-03-07 10:13:11.673694579 +0000 2026-03-07T10:15:03.352 INFO:teuthology.orchestra.run.vm07.stdout:Change: 2026-03-07 10:13:11.673694579 +0000 2026-03-07T10:15:03.352 INFO:teuthology.orchestra.run.vm07.stdout: Birth: 2026-03-07 10:05:12.220000000 +0000 2026-03-07T10:15:03.352 DEBUG:teuthology.orchestra.run.vm07:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-07T10:15:03.415 INFO:teuthology.orchestra.run.vm07.stderr:1+0 records in 2026-03-07T10:15:03.415 INFO:teuthology.orchestra.run.vm07.stderr:1+0 records out 2026-03-07T10:15:03.415 INFO:teuthology.orchestra.run.vm07.stderr:512 bytes copied, 0.000125154 s, 4.1 MB/s 2026-03-07T10:15:03.416 DEBUG:teuthology.orchestra.run.vm07:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-07T10:15:03.472 DEBUG:teuthology.orchestra.run.vm07:> stat /dev/vdc 2026-03-07T10:15:03.531 INFO:teuthology.orchestra.run.vm07.stdout: File: /dev/vdc 2026-03-07T10:15:03.531 INFO:teuthology.orchestra.run.vm07.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-07T10:15:03.531 INFO:teuthology.orchestra.run.vm07.stdout:Device: 6h/6d Inode: 222 Links: 1 Device type: fc,20 2026-03-07T10:15:03.531 INFO:teuthology.orchestra.run.vm07.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-07T10:15:03.531 INFO:teuthology.orchestra.run.vm07.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-07T10:15:03.531 INFO:teuthology.orchestra.run.vm07.stdout:Access: 2026-03-07 10:14:55.612728128 +0000 2026-03-07T10:15:03.531 INFO:teuthology.orchestra.run.vm07.stdout:Modify: 2026-03-07 10:13:11.691694593 +0000 2026-03-07T10:15:03.531 INFO:teuthology.orchestra.run.vm07.stdout:Change: 2026-03-07 10:13:11.691694593 +0000 2026-03-07T10:15:03.531 INFO:teuthology.orchestra.run.vm07.stdout: Birth: 2026-03-07 10:05:12.229000000 +0000 2026-03-07T10:15:03.532 DEBUG:teuthology.orchestra.run.vm07:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-07T10:15:03.598 INFO:teuthology.orchestra.run.vm07.stderr:1+0 records in 2026-03-07T10:15:03.598 INFO:teuthology.orchestra.run.vm07.stderr:1+0 records out 2026-03-07T10:15:03.598 INFO:teuthology.orchestra.run.vm07.stderr:512 bytes copied, 0.000219842 s, 2.3 MB/s 2026-03-07T10:15:03.600 DEBUG:teuthology.orchestra.run.vm07:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-07T10:15:03.659 DEBUG:teuthology.orchestra.run.vm07:> stat /dev/vdd 2026-03-07T10:15:03.723 INFO:teuthology.orchestra.run.vm07.stdout: File: /dev/vdd 2026-03-07T10:15:03.723 INFO:teuthology.orchestra.run.vm07.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-07T10:15:03.723 INFO:teuthology.orchestra.run.vm07.stdout:Device: 6h/6d Inode: 256 Links: 1 Device type: fc,30 2026-03-07T10:15:03.723 INFO:teuthology.orchestra.run.vm07.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-07T10:15:03.723 INFO:teuthology.orchestra.run.vm07.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-07T10:15:03.723 INFO:teuthology.orchestra.run.vm07.stdout:Access: 2026-03-07 10:14:55.615728131 +0000 2026-03-07T10:15:03.723 INFO:teuthology.orchestra.run.vm07.stdout:Modify: 2026-03-07 10:13:11.675694580 +0000 2026-03-07T10:15:03.723 INFO:teuthology.orchestra.run.vm07.stdout:Change: 2026-03-07 10:13:11.675694580 +0000 2026-03-07T10:15:03.723 INFO:teuthology.orchestra.run.vm07.stdout: Birth: 2026-03-07 10:05:12.251000000 +0000 2026-03-07T10:15:03.723 DEBUG:teuthology.orchestra.run.vm07:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-07T10:15:03.790 INFO:teuthology.orchestra.run.vm07.stderr:1+0 records in 2026-03-07T10:15:03.790 INFO:teuthology.orchestra.run.vm07.stderr:1+0 records out 2026-03-07T10:15:03.790 INFO:teuthology.orchestra.run.vm07.stderr:512 bytes copied, 0.00021365 s, 2.4 MB/s 2026-03-07T10:15:03.791 DEBUG:teuthology.orchestra.run.vm07:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-07T10:15:03.852 DEBUG:teuthology.orchestra.run.vm07:> stat /dev/vde 2026-03-07T10:15:03.910 INFO:teuthology.orchestra.run.vm07.stdout: File: /dev/vde 2026-03-07T10:15:03.910 INFO:teuthology.orchestra.run.vm07.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-07T10:15:03.911 INFO:teuthology.orchestra.run.vm07.stdout:Device: 6h/6d Inode: 257 Links: 1 Device type: fc,40 2026-03-07T10:15:03.911 INFO:teuthology.orchestra.run.vm07.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-07T10:15:03.911 INFO:teuthology.orchestra.run.vm07.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-07T10:15:03.911 INFO:teuthology.orchestra.run.vm07.stdout:Access: 2026-03-07 10:14:55.618728133 +0000 2026-03-07T10:15:03.911 INFO:teuthology.orchestra.run.vm07.stdout:Modify: 2026-03-07 10:13:11.689694591 +0000 2026-03-07T10:15:03.911 INFO:teuthology.orchestra.run.vm07.stdout:Change: 2026-03-07 10:13:11.689694591 +0000 2026-03-07T10:15:03.911 INFO:teuthology.orchestra.run.vm07.stdout: Birth: 2026-03-07 10:05:12.298000000 +0000 2026-03-07T10:15:03.911 DEBUG:teuthology.orchestra.run.vm07:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-07T10:15:03.959 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:03 vm04 ceph-mon[49935]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-07T10:15:03.959 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:03 vm04 ceph-mon[49935]: Reconfiguring mgr.a (unknown last config time)... 2026-03-07T10:15:03.959 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:03 vm04 ceph-mon[49935]: Reconfiguring daemon mgr.a on vm01 2026-03-07T10:15:03.959 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:03 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:03.959 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:03 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:03.959 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:03 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:03.959 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:03 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:03.959 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:03 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:03.959 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:03 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:03.959 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:03 vm04 ceph-mon[49935]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-07T10:15:03.959 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:03 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:03.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:03 vm01 ceph-mon[49602]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-07T10:15:03.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:03 vm01 ceph-mon[49602]: Reconfiguring mgr.a (unknown last config time)... 2026-03-07T10:15:03.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:03 vm01 ceph-mon[49602]: Reconfiguring daemon mgr.a on vm01 2026-03-07T10:15:03.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:03 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:03.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:03 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:03.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:03 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:03.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:03 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:03.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:03 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:03.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:03 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:03.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:03 vm01 ceph-mon[49602]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-07T10:15:03.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:03 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:03.977 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:03 vm07 ceph-mon[68568]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-07T10:15:03.977 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:03 vm07 ceph-mon[68568]: Reconfiguring mgr.a (unknown last config time)... 2026-03-07T10:15:03.977 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:03 vm07 ceph-mon[68568]: Reconfiguring daemon mgr.a on vm01 2026-03-07T10:15:03.977 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:03 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:03.977 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:03 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:03.977 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:03 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:03.977 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:03 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:03.977 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:03 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:03.977 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:03 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:03.977 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:03 vm07 ceph-mon[68568]: Metadata not up to date on all hosts. Skipping non agent specs 2026-03-07T10:15:03.977 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:03 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:03.979 INFO:teuthology.orchestra.run.vm07.stderr:1+0 records in 2026-03-07T10:15:03.979 INFO:teuthology.orchestra.run.vm07.stderr:1+0 records out 2026-03-07T10:15:03.979 INFO:teuthology.orchestra.run.vm07.stderr:512 bytes copied, 0.000162655 s, 3.1 MB/s 2026-03-07T10:15:03.980 DEBUG:teuthology.orchestra.run.vm07:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-07T10:15:04.039 INFO:tasks.cephadm:Deploying osd.0 on vm01 with /dev/vde... 2026-03-07T10:15:04.039 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- lvm zap /dev/vde 2026-03-07T10:15:04.210 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:15:04.600 INFO:journalctl@ceph.mgr.b.vm04.stdout:Mar 07 10:15:04 vm04 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-b[51400]: 2026-03-07T10:15:04.281+0000 7fc663fe3100 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-07T10:15:05.185 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:04 vm01 ceph-mon[49602]: pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:15:05.185 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:04 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:05.185 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:04 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:05.185 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:04 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:05.185 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:04 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:05.185 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:04 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:05.185 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:04 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:05.185 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:04 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:05.185 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:04 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:05.185 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:04 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:05.185 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:04 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:05.303 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-07T10:15:05.322 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph orch daemon add osd vm01:/dev/vde 2026-03-07T10:15:05.329 INFO:journalctl@ceph.mgr.b.vm04.stdout:Mar 07 10:15:05 vm04 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-b[51400]: 2026-03-07T10:15:05.193+0000 7fc663fe3100 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-07T10:15:05.329 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:04 vm04 ceph-mon[49935]: pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:15:05.329 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:04 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:05.329 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:04 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:05.329 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:04 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:05.329 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:04 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:05.329 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:04 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:05.329 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:04 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:05.329 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:04 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:05.329 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:04 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:05.329 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:04 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:05.329 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:04 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:05.432 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:04 vm07 ceph-mon[68568]: pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:15:05.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:04 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:05.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:04 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:05.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:04 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:05.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:04 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:05.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:04 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:05.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:04 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:05.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:04 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:05.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:04 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:05.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:04 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:05.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:04 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:05.499 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:15:05.592 INFO:journalctl@ceph.mgr.b.vm04.stdout:Mar 07 10:15:05 vm04 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-b[51400]: 2026-03-07T10:15:05.328+0000 7fc663fe3100 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-07T10:15:05.850 INFO:journalctl@ceph.mgr.b.vm04.stdout:Mar 07 10:15:05 vm04 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-b[51400]: 2026-03-07T10:15:05.591+0000 7fc663fe3100 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-07T10:15:05.951 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:05 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-07T10:15:05.951 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:05 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-07T10:15:05.951 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:05 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:06.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:05 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-07T10:15:06.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:05 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-07T10:15:06.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:05 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:06.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:05 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-07T10:15:06.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:05 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-07T10:15:06.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:05 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:06.978 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:06 vm01 ceph-mon[49602]: pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:15:06.978 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:06 vm01 ceph-mon[49602]: from='client.24104 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm01:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:15:06.978 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:06 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/4133620175' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "2672a018-9301-4f8e-b634-bac5c79f0203"}]: dispatch 2026-03-07T10:15:06.978 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:06 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/4133620175' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "2672a018-9301-4f8e-b634-bac5c79f0203"}]': finished 2026-03-07T10:15:06.978 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:06 vm01 ceph-mon[49602]: osdmap e5: 1 total, 0 up, 1 in 2026-03-07T10:15:06.979 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:06 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:15:07.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:06 vm04 ceph-mon[49935]: pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:15:07.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:06 vm04 ceph-mon[49935]: from='client.24104 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm01:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:15:07.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:06 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/4133620175' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "2672a018-9301-4f8e-b634-bac5c79f0203"}]: dispatch 2026-03-07T10:15:07.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:06 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/4133620175' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "2672a018-9301-4f8e-b634-bac5c79f0203"}]': finished 2026-03-07T10:15:07.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:06 vm04 ceph-mon[49935]: osdmap e5: 1 total, 0 up, 1 in 2026-03-07T10:15:07.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:06 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:15:07.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:06 vm07 ceph-mon[68568]: pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:15:07.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:06 vm07 ceph-mon[68568]: from='client.24104 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm01:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:15:07.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:06 vm07 ceph-mon[68568]: from='client.? 192.168.123.101:0/4133620175' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "2672a018-9301-4f8e-b634-bac5c79f0203"}]: dispatch 2026-03-07T10:15:07.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:06 vm07 ceph-mon[68568]: from='client.? 192.168.123.101:0/4133620175' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "2672a018-9301-4f8e-b634-bac5c79f0203"}]': finished 2026-03-07T10:15:07.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:06 vm07 ceph-mon[68568]: osdmap e5: 1 total, 0 up, 1 in 2026-03-07T10:15:07.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:06 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:15:07.850 INFO:journalctl@ceph.mgr.b.vm04.stdout:Mar 07 10:15:07 vm04 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-b[51400]: 2026-03-07T10:15:07.505+0000 7fc663fe3100 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-07T10:15:08.112 INFO:journalctl@ceph.mgr.b.vm04.stdout:Mar 07 10:15:07 vm04 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-b[51400]: 2026-03-07T10:15:07.850+0000 7fc663fe3100 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-07T10:15:08.112 INFO:journalctl@ceph.mgr.b.vm04.stdout:Mar 07 10:15:07 vm04 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-b[51400]: 2026-03-07T10:15:07.989+0000 7fc663fe3100 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-07T10:15:08.112 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:07 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/4113628148' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-07T10:15:08.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:07 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/4113628148' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-07T10:15:08.379 INFO:journalctl@ceph.mgr.b.vm04.stdout:Mar 07 10:15:08 vm04 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-b[51400]: 2026-03-07T10:15:08.111+0000 7fc663fe3100 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-07T10:15:08.379 INFO:journalctl@ceph.mgr.b.vm04.stdout:Mar 07 10:15:08 vm04 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-b[51400]: 2026-03-07T10:15:08.253+0000 7fc663fe3100 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-07T10:15:08.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:07 vm07 ceph-mon[68568]: from='client.? 192.168.123.101:0/4113628148' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-07T10:15:08.850 INFO:journalctl@ceph.mgr.b.vm04.stdout:Mar 07 10:15:08 vm04 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-b[51400]: 2026-03-07T10:15:08.378+0000 7fc663fe3100 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-07T10:15:09.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:08 vm01 ceph-mon[49602]: pgmap v13: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:15:09.350 INFO:journalctl@ceph.mgr.b.vm04.stdout:Mar 07 10:15:08 vm04 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-b[51400]: 2026-03-07T10:15:08.922+0000 7fc663fe3100 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-07T10:15:09.350 INFO:journalctl@ceph.mgr.b.vm04.stdout:Mar 07 10:15:09 vm04 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-b[51400]: 2026-03-07T10:15:09.078+0000 7fc663fe3100 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-07T10:15:09.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:08 vm04 ceph-mon[49935]: pgmap v13: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:15:09.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:08 vm07 ceph-mon[68568]: pgmap v13: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:15:10.100 INFO:journalctl@ceph.mgr.b.vm04.stdout:Mar 07 10:15:09 vm04 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-b[51400]: 2026-03-07T10:15:09.819+0000 7fc663fe3100 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-07T10:15:10.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:10 vm01 ceph-mon[49602]: pgmap v14: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:15:11.179 INFO:journalctl@ceph.mgr.b.vm04.stdout:Mar 07 10:15:10 vm04 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-b[51400]: 2026-03-07T10:15:10.916+0000 7fc663fe3100 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-07T10:15:11.179 INFO:journalctl@ceph.mgr.b.vm04.stdout:Mar 07 10:15:11 vm04 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-b[51400]: 2026-03-07T10:15:11.046+0000 7fc663fe3100 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-07T10:15:11.179 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:10 vm04 ceph-mon[49935]: pgmap v14: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:15:11.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:10 vm07 ceph-mon[68568]: pgmap v14: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:15:11.460 INFO:journalctl@ceph.mgr.b.vm04.stdout:Mar 07 10:15:11 vm04 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-b[51400]: 2026-03-07T10:15:11.178+0000 7fc663fe3100 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-07T10:15:11.850 INFO:journalctl@ceph.mgr.b.vm04.stdout:Mar 07 10:15:11 vm04 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-b[51400]: 2026-03-07T10:15:11.459+0000 7fc663fe3100 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-07T10:15:11.850 INFO:journalctl@ceph.mgr.b.vm04.stdout:Mar 07 10:15:11 vm04 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-b[51400]: 2026-03-07T10:15:11.592+0000 7fc663fe3100 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-07T10:15:12.283 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:11 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-07T10:15:12.284 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:11 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:12.288 INFO:journalctl@ceph.mgr.b.vm04.stdout:Mar 07 10:15:11 vm04 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-b[51400]: 2026-03-07T10:15:11.922+0000 7fc663fe3100 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-07T10:15:12.288 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:11 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-07T10:15:12.288 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:11 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:12.432 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:11 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-07T10:15:12.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:11 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:12.600 INFO:journalctl@ceph.mgr.b.vm04.stdout:Mar 07 10:15:12 vm04 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-b[51400]: 2026-03-07T10:15:12.287+0000 7fc663fe3100 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-07T10:15:12.984 INFO:journalctl@ceph.mgr.b.vm04.stdout:Mar 07 10:15:12 vm04 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-b[51400]: 2026-03-07T10:15:12.677+0000 7fc663fe3100 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-07T10:15:12.984 INFO:journalctl@ceph.mgr.b.vm04.stdout:Mar 07 10:15:12 vm04 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-b[51400]: 2026-03-07T10:15:12.808+0000 7fc663fe3100 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-07T10:15:13.110 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:12 vm01 ceph-mon[49602]: Deploying daemon osd.0 on vm01 2026-03-07T10:15:13.110 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:12 vm01 ceph-mon[49602]: pgmap v15: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:15:13.110 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:12 vm01 ceph-mon[49602]: Standby manager daemon b started 2026-03-07T10:15:13.110 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:12 vm01 ceph-mon[49602]: from='mgr.? 192.168.123.104:0/559032863' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/b/crt"}]: dispatch 2026-03-07T10:15:13.110 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:12 vm01 ceph-mon[49602]: from='mgr.? 192.168.123.104:0/559032863' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-07T10:15:13.110 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:12 vm01 ceph-mon[49602]: from='mgr.? 192.168.123.104:0/559032863' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/b/key"}]: dispatch 2026-03-07T10:15:13.110 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:12 vm01 ceph-mon[49602]: from='mgr.? 192.168.123.104:0/559032863' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-07T10:15:13.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:12 vm04 ceph-mon[49935]: Deploying daemon osd.0 on vm01 2026-03-07T10:15:13.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:12 vm04 ceph-mon[49935]: pgmap v15: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:15:13.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:12 vm04 ceph-mon[49935]: Standby manager daemon b started 2026-03-07T10:15:13.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:12 vm04 ceph-mon[49935]: from='mgr.? 192.168.123.104:0/559032863' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/b/crt"}]: dispatch 2026-03-07T10:15:13.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:12 vm04 ceph-mon[49935]: from='mgr.? 192.168.123.104:0/559032863' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-07T10:15:13.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:12 vm04 ceph-mon[49935]: from='mgr.? 192.168.123.104:0/559032863' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/b/key"}]: dispatch 2026-03-07T10:15:13.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:12 vm04 ceph-mon[49935]: from='mgr.? 192.168.123.104:0/559032863' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-07T10:15:13.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:12 vm07 ceph-mon[68568]: Deploying daemon osd.0 on vm01 2026-03-07T10:15:13.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:12 vm07 ceph-mon[68568]: pgmap v15: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:15:13.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:12 vm07 ceph-mon[68568]: Standby manager daemon b started 2026-03-07T10:15:13.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:12 vm07 ceph-mon[68568]: from='mgr.? 192.168.123.104:0/559032863' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/b/crt"}]: dispatch 2026-03-07T10:15:13.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:12 vm07 ceph-mon[68568]: from='mgr.? 192.168.123.104:0/559032863' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-07T10:15:13.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:12 vm07 ceph-mon[68568]: from='mgr.? 192.168.123.104:0/559032863' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/b/key"}]: dispatch 2026-03-07T10:15:13.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:12 vm07 ceph-mon[68568]: from='mgr.? 192.168.123.104:0/559032863' entity='mgr.b' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-07T10:15:14.264 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:13 vm01 ceph-mon[49602]: mgrmap e14: a(active, since 41s), standbys: b 2026-03-07T10:15:14.264 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:13 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mgr metadata", "who": "b", "id": "b"}]: dispatch 2026-03-07T10:15:14.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:13 vm04 ceph-mon[49935]: mgrmap e14: a(active, since 41s), standbys: b 2026-03-07T10:15:14.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:13 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mgr metadata", "who": "b", "id": "b"}]: dispatch 2026-03-07T10:15:14.432 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:13 vm07 ceph-mon[68568]: mgrmap e14: a(active, since 41s), standbys: b 2026-03-07T10:15:14.432 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:13 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mgr metadata", "who": "b", "id": "b"}]: dispatch 2026-03-07T10:15:15.095 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:15 vm01 ceph-mon[49602]: pgmap v16: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:15:15.095 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:15 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:15.095 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:15 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:15.095 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:15 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:15.095 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:15 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:15.095 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:15 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:15.095 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:15 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:15.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:15 vm04 ceph-mon[49935]: pgmap v16: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:15:15.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:15 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:15.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:15 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:15.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:15 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:15.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:15 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:15.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:15 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:15.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:15 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:15.434 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:15 vm07 ceph-mon[68568]: pgmap v16: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:15:15.434 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:15 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:15.434 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:15 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:15.434 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:15 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:15.434 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:15 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:15.434 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:15 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:15.434 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:15 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:15.863 INFO:teuthology.orchestra.run.vm01.stdout:Created osd(s) 0 on host 'vm01' 2026-03-07T10:15:15.952 DEBUG:teuthology.orchestra.run.vm01:osd.0> sudo journalctl -f -n 0 -u ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@osd.0.service 2026-03-07T10:15:15.953 INFO:tasks.cephadm:Deploying osd.1 on vm01 with /dev/vdd... 2026-03-07T10:15:15.953 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- lvm zap /dev/vdd 2026-03-07T10:15:16.089 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:16 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:16.089 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:16 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:16.089 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:16 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:16.089 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:16 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:16.089 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:16 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:15:16.089 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:16 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:16.089 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:16 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:16.089 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:16 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:16.089 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:16 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:16.089 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:16 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:16.166 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:15:16.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:16 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:16.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:16 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:16.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:16 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:16.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:16 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:16.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:16 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:15:16.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:16 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:16.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:16 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:16.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:16 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:16.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:16 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:16.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:16 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:16.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:16 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:16.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:16 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:16.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:16 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:16.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:16 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:16.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:16 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:15:16.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:16 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:16.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:16 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:16.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:16 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:16.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:16 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:16.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:16 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:16.374 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 07 10:15:16 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-0[58428]: 2026-03-07T10:15:16.214+0000 7f7f73bb2740 -1 osd.0 0 log_to_monitors true 2026-03-07T10:15:17.250 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-07T10:15:17.271 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph orch daemon add osd vm01:/dev/vdd 2026-03-07T10:15:17.355 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:17 vm01 ceph-mon[49602]: pgmap v17: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:15:17.356 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:17 vm01 ceph-mon[49602]: from='osd.0 [v2:192.168.123.101:6802/4213759478,v1:192.168.123.101:6803/4213759478]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-07T10:15:17.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:17 vm07 ceph-mon[68568]: pgmap v17: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:15:17.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:17 vm07 ceph-mon[68568]: from='osd.0 [v2:192.168.123.101:6802/4213759478,v1:192.168.123.101:6803/4213759478]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-07T10:15:17.471 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:15:17.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:17 vm04 ceph-mon[49935]: pgmap v17: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:15:17.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:17 vm04 ceph-mon[49935]: from='osd.0 [v2:192.168.123.101:6802/4213759478,v1:192.168.123.101:6803/4213759478]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-07T10:15:18.354 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:18 vm01 ceph-mon[49602]: from='osd.0 [v2:192.168.123.101:6802/4213759478,v1:192.168.123.101:6803/4213759478]' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-07T10:15:18.354 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:18 vm01 ceph-mon[49602]: osdmap e6: 1 total, 0 up, 1 in 2026-03-07T10:15:18.354 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:18 vm01 ceph-mon[49602]: from='osd.0 [v2:192.168.123.101:6802/4213759478,v1:192.168.123.101:6803/4213759478]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-07T10:15:18.354 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:18 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:15:18.354 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:18 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:18.354 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:18 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:18.354 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:18 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:18.354 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:18 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-07T10:15:18.354 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:18 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-07T10:15:18.354 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:18 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:18.354 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:18 vm01 ceph-mon[49602]: from='osd.0 [v2:192.168.123.101:6802/4213759478,v1:192.168.123.101:6803/4213759478]' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-03-07T10:15:18.354 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:18 vm01 ceph-mon[49602]: osdmap e7: 1 total, 0 up, 1 in 2026-03-07T10:15:18.354 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:18 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:15:18.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:18 vm07 ceph-mon[68568]: from='osd.0 [v2:192.168.123.101:6802/4213759478,v1:192.168.123.101:6803/4213759478]' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-07T10:15:18.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:18 vm07 ceph-mon[68568]: osdmap e6: 1 total, 0 up, 1 in 2026-03-07T10:15:18.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:18 vm07 ceph-mon[68568]: from='osd.0 [v2:192.168.123.101:6802/4213759478,v1:192.168.123.101:6803/4213759478]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-07T10:15:18.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:18 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:15:18.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:18 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:18.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:18 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:18.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:18 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:18.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:18 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-07T10:15:18.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:18 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-07T10:15:18.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:18 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:18.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:18 vm07 ceph-mon[68568]: from='osd.0 [v2:192.168.123.101:6802/4213759478,v1:192.168.123.101:6803/4213759478]' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-03-07T10:15:18.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:18 vm07 ceph-mon[68568]: osdmap e7: 1 total, 0 up, 1 in 2026-03-07T10:15:18.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:18 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:15:18.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:18 vm04 ceph-mon[49935]: from='osd.0 [v2:192.168.123.101:6802/4213759478,v1:192.168.123.101:6803/4213759478]' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-07T10:15:18.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:18 vm04 ceph-mon[49935]: osdmap e6: 1 total, 0 up, 1 in 2026-03-07T10:15:18.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:18 vm04 ceph-mon[49935]: from='osd.0 [v2:192.168.123.101:6802/4213759478,v1:192.168.123.101:6803/4213759478]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-07T10:15:18.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:18 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:15:18.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:18 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:18.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:18 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:18.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:18 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:18.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:18 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-07T10:15:18.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:18 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-07T10:15:18.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:18 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:18.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:18 vm04 ceph-mon[49935]: from='osd.0 [v2:192.168.123.101:6802/4213759478,v1:192.168.123.101:6803/4213759478]' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-03-07T10:15:18.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:18 vm04 ceph-mon[49935]: osdmap e7: 1 total, 0 up, 1 in 2026-03-07T10:15:18.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:18 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:15:19.224 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 07 10:15:19 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-0[58428]: 2026-03-07T10:15:19.081+0000 7f7f6fb33640 -1 osd.0 0 waiting for initial osdmap 2026-03-07T10:15:19.224 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 07 10:15:19 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-0[58428]: 2026-03-07T10:15:19.087+0000 7f7f6b15c640 -1 osd.0 7 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-07T10:15:19.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:19 vm01 ceph-mon[49602]: pgmap v19: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:15:19.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:19 vm01 ceph-mon[49602]: from='client.14250 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm01:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:15:19.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:19 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:15:19.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:19 vm01 ceph-mon[49602]: from='osd.0 [v2:192.168.123.101:6802/4213759478,v1:192.168.123.101:6803/4213759478]' entity='osd.0' 2026-03-07T10:15:19.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:19 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:15:19.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:19 vm07 ceph-mon[68568]: pgmap v19: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:15:19.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:19 vm07 ceph-mon[68568]: from='client.14250 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm01:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:15:19.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:19 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:15:19.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:19 vm07 ceph-mon[68568]: from='osd.0 [v2:192.168.123.101:6802/4213759478,v1:192.168.123.101:6803/4213759478]' entity='osd.0' 2026-03-07T10:15:19.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:19 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:15:19.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:19 vm04 ceph-mon[49935]: pgmap v19: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:15:19.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:19 vm04 ceph-mon[49935]: from='client.14250 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm01:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:15:19.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:19 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:15:19.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:19 vm04 ceph-mon[49935]: from='osd.0 [v2:192.168.123.101:6802/4213759478,v1:192.168.123.101:6803/4213759478]' entity='osd.0' 2026-03-07T10:15:19.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:19 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:15:20.432 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:20 vm07 ceph-mon[68568]: purged_snaps scrub starts 2026-03-07T10:15:20.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:20 vm07 ceph-mon[68568]: purged_snaps scrub ok 2026-03-07T10:15:20.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:20 vm07 ceph-mon[68568]: from='client.? 192.168.123.101:0/808473893' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "930d5f0c-1a5a-4d6c-b82c-8410fdb0227e"}]: dispatch 2026-03-07T10:15:20.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:20 vm07 ceph-mon[68568]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "930d5f0c-1a5a-4d6c-b82c-8410fdb0227e"}]: dispatch 2026-03-07T10:15:20.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:20 vm07 ceph-mon[68568]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "930d5f0c-1a5a-4d6c-b82c-8410fdb0227e"}]': finished 2026-03-07T10:15:20.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:20 vm07 ceph-mon[68568]: osd.0 [v2:192.168.123.101:6802/4213759478,v1:192.168.123.101:6803/4213759478] boot 2026-03-07T10:15:20.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:20 vm07 ceph-mon[68568]: osdmap e8: 2 total, 1 up, 2 in 2026-03-07T10:15:20.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:20 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:15:20.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:20 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:15:20.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:20 vm07 ceph-mon[68568]: from='client.? 192.168.123.101:0/3949637207' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-07T10:15:20.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:20 vm01 ceph-mon[49602]: purged_snaps scrub starts 2026-03-07T10:15:20.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:20 vm01 ceph-mon[49602]: purged_snaps scrub ok 2026-03-07T10:15:20.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:20 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/808473893' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "930d5f0c-1a5a-4d6c-b82c-8410fdb0227e"}]: dispatch 2026-03-07T10:15:20.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:20 vm01 ceph-mon[49602]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "930d5f0c-1a5a-4d6c-b82c-8410fdb0227e"}]: dispatch 2026-03-07T10:15:20.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:20 vm01 ceph-mon[49602]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "930d5f0c-1a5a-4d6c-b82c-8410fdb0227e"}]': finished 2026-03-07T10:15:20.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:20 vm01 ceph-mon[49602]: osd.0 [v2:192.168.123.101:6802/4213759478,v1:192.168.123.101:6803/4213759478] boot 2026-03-07T10:15:20.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:20 vm01 ceph-mon[49602]: osdmap e8: 2 total, 1 up, 2 in 2026-03-07T10:15:20.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:20 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:15:20.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:20 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:15:20.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:20 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/3949637207' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-07T10:15:20.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:20 vm04 ceph-mon[49935]: purged_snaps scrub starts 2026-03-07T10:15:20.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:20 vm04 ceph-mon[49935]: purged_snaps scrub ok 2026-03-07T10:15:20.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:20 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/808473893' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "930d5f0c-1a5a-4d6c-b82c-8410fdb0227e"}]: dispatch 2026-03-07T10:15:20.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:20 vm04 ceph-mon[49935]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "930d5f0c-1a5a-4d6c-b82c-8410fdb0227e"}]: dispatch 2026-03-07T10:15:20.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:20 vm04 ceph-mon[49935]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "930d5f0c-1a5a-4d6c-b82c-8410fdb0227e"}]': finished 2026-03-07T10:15:20.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:20 vm04 ceph-mon[49935]: osd.0 [v2:192.168.123.101:6802/4213759478,v1:192.168.123.101:6803/4213759478] boot 2026-03-07T10:15:20.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:20 vm04 ceph-mon[49935]: osdmap e8: 2 total, 1 up, 2 in 2026-03-07T10:15:20.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:20 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-07T10:15:20.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:20 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:15:20.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:20 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/3949637207' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-07T10:15:21.432 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:21 vm07 ceph-mon[68568]: pgmap v22: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:15:21.432 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:21 vm07 ceph-mon[68568]: osdmap e9: 2 total, 1 up, 2 in 2026-03-07T10:15:21.432 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:21 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:15:21.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:21 vm01 ceph-mon[49602]: pgmap v22: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:15:21.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:21 vm01 ceph-mon[49602]: osdmap e9: 2 total, 1 up, 2 in 2026-03-07T10:15:21.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:21 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:15:21.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:21 vm04 ceph-mon[49935]: pgmap v22: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-07T10:15:21.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:21 vm04 ceph-mon[49935]: osdmap e9: 2 total, 1 up, 2 in 2026-03-07T10:15:21.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:21 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:15:23.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:23 vm01 ceph-mon[49602]: pgmap v24: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:15:23.395 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:23 vm04 ceph-mon[49935]: pgmap v24: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:15:23.432 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:23 vm07 ceph-mon[68568]: pgmap v24: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:15:24.402 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:24 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-07T10:15:24.402 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:24 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:24.402 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:24 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:24.402 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:24 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:24.403 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:24 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:24.432 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:24 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-07T10:15:24.432 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:24 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:24.432 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:24 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:24.432 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:24 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:24.432 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:24 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:24.440 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:24 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-07T10:15:24.440 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:24 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:24.440 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:24 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:24.440 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:24 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:24.440 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:24 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:25.398 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:25 vm01 ceph-mon[49602]: Deploying daemon osd.1 on vm01 2026-03-07T10:15:25.398 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:25 vm01 ceph-mon[49602]: pgmap v25: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:15:25.432 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:25 vm07 ceph-mon[68568]: Deploying daemon osd.1 on vm01 2026-03-07T10:15:25.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:25 vm07 ceph-mon[68568]: pgmap v25: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:15:25.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:25 vm04 ceph-mon[49935]: Deploying daemon osd.1 on vm01 2026-03-07T10:15:25.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:25 vm04 ceph-mon[49935]: pgmap v25: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:15:27.162 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:27 vm01 ceph-mon[49602]: pgmap v26: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:15:27.162 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:27 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:27.162 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:27 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:27.162 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:27 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:27.162 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:27 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:27.162 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:27 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:27.162 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:27 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:27.432 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:27 vm07 ceph-mon[68568]: pgmap v26: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:15:27.432 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:27 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:27.432 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:27 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:27.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:27 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:27.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:27 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:27.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:27 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:27.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:27 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:27.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:27 vm04 ceph-mon[49935]: pgmap v26: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:15:27.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:27 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:27.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:27 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:27.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:27 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:27.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:27 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:27.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:27 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:27.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:27 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:28.177 INFO:teuthology.orchestra.run.vm01.stdout:Created osd(s) 1 on host 'vm01' 2026-03-07T10:15:28.237 DEBUG:teuthology.orchestra.run.vm01:osd.1> sudo journalctl -f -n 0 -u ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@osd.1.service 2026-03-07T10:15:28.239 INFO:tasks.cephadm:Deploying osd.2 on vm04 with /dev/vde... 2026-03-07T10:15:28.239 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- lvm zap /dev/vde 2026-03-07T10:15:28.410 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.b/config 2026-03-07T10:15:28.432 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:28 vm07 ceph-mon[68568]: from='osd.1 [v2:192.168.123.101:6810/2110476213,v1:192.168.123.101:6811/2110476213]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-07T10:15:28.437 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:28 vm04 ceph-mon[49935]: from='osd.1 [v2:192.168.123.101:6810/2110476213,v1:192.168.123.101:6811/2110476213]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-07T10:15:28.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:28 vm01 ceph-mon[49602]: from='osd.1 [v2:192.168.123.101:6810/2110476213,v1:192.168.123.101:6811/2110476213]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-07T10:15:29.432 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:29 vm07 ceph-mon[68568]: pgmap v27: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:15:29.432 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:29 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:29.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:29 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:15:29.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:29 vm07 ceph-mon[68568]: from='osd.1 [v2:192.168.123.101:6810/2110476213,v1:192.168.123.101:6811/2110476213]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-07T10:15:29.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:29 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:29.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:29 vm07 ceph-mon[68568]: osdmap e10: 2 total, 1 up, 2 in 2026-03-07T10:15:29.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:29 vm07 ceph-mon[68568]: from='osd.1 [v2:192.168.123.101:6810/2110476213,v1:192.168.123.101:6811/2110476213]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-07T10:15:29.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:29 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:15:29.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:29 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:29.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:29 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:29.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:29 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:29.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:29 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:29.435 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:29 vm04 ceph-mon[49935]: pgmap v27: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:15:29.435 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:29 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:29.435 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:29 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:15:29.435 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:29 vm04 ceph-mon[49935]: from='osd.1 [v2:192.168.123.101:6810/2110476213,v1:192.168.123.101:6811/2110476213]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-07T10:15:29.435 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:29 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:29.435 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:29 vm04 ceph-mon[49935]: osdmap e10: 2 total, 1 up, 2 in 2026-03-07T10:15:29.435 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:29 vm04 ceph-mon[49935]: from='osd.1 [v2:192.168.123.101:6810/2110476213,v1:192.168.123.101:6811/2110476213]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-07T10:15:29.436 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:29 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:15:29.436 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:29 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:29.436 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:29 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:29.436 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:29 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:29.436 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:29 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:29.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:29 vm01 ceph-mon[49602]: pgmap v27: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:15:29.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:29 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:29.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:29 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:15:29.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:29 vm01 ceph-mon[49602]: from='osd.1 [v2:192.168.123.101:6810/2110476213,v1:192.168.123.101:6811/2110476213]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-07T10:15:29.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:29 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:29.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:29 vm01 ceph-mon[49602]: osdmap e10: 2 total, 1 up, 2 in 2026-03-07T10:15:29.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:29 vm01 ceph-mon[49602]: from='osd.1 [v2:192.168.123.101:6810/2110476213,v1:192.168.123.101:6811/2110476213]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm01", "root=default"]}]: dispatch 2026-03-07T10:15:29.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:29 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:15:29.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:29 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:29.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:29 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:29.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:29 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:29.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:29 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:29.475 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 07 10:15:29 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-1[61656]: 2026-03-07T10:15:29.204+0000 7f60376b4640 -1 osd.1 0 waiting for initial osdmap 2026-03-07T10:15:29.475 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 07 10:15:29 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-1[61656]: 2026-03-07T10:15:29.210+0000 7f6032ccb640 -1 osd.1 11 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-07T10:15:29.556 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-07T10:15:29.576 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph orch daemon add osd vm04:/dev/vde 2026-03-07T10:15:29.747 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.b/config 2026-03-07T10:15:30.459 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:30 vm04 ceph-mon[49935]: from='osd.1 [v2:192.168.123.101:6810/2110476213,v1:192.168.123.101:6811/2110476213]' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-03-07T10:15:30.459 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:30 vm04 ceph-mon[49935]: osdmap e11: 2 total, 1 up, 2 in 2026-03-07T10:15:30.459 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:30 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:15:30.459 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:30 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:15:30.459 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:30 vm04 ceph-mon[49935]: pgmap v30: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:15:30.459 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:30 vm04 ceph-mon[49935]: from='client.24151 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm04:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:15:30.459 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:30 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-07T10:15:30.459 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:30 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-07T10:15:30.459 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:30 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:30.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:30 vm01 ceph-mon[49602]: from='osd.1 [v2:192.168.123.101:6810/2110476213,v1:192.168.123.101:6811/2110476213]' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-03-07T10:15:30.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:30 vm01 ceph-mon[49602]: osdmap e11: 2 total, 1 up, 2 in 2026-03-07T10:15:30.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:30 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:15:30.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:30 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:15:30.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:30 vm01 ceph-mon[49602]: pgmap v30: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:15:30.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:30 vm01 ceph-mon[49602]: from='client.24151 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm04:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:15:30.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:30 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-07T10:15:30.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:30 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-07T10:15:30.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:30 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:30.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:30 vm07 ceph-mon[68568]: from='osd.1 [v2:192.168.123.101:6810/2110476213,v1:192.168.123.101:6811/2110476213]' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-03-07T10:15:30.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:30 vm07 ceph-mon[68568]: osdmap e11: 2 total, 1 up, 2 in 2026-03-07T10:15:30.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:30 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:15:30.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:30 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:15:30.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:30 vm07 ceph-mon[68568]: pgmap v30: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-07T10:15:30.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:30 vm07 ceph-mon[68568]: from='client.24151 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm04:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:15:30.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:30 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-07T10:15:30.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:30 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-07T10:15:30.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:30 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:31.230 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:31 vm04 ceph-mon[49935]: purged_snaps scrub starts 2026-03-07T10:15:31.231 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:31 vm04 ceph-mon[49935]: purged_snaps scrub ok 2026-03-07T10:15:31.231 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:31 vm04 ceph-mon[49935]: osd.1 [v2:192.168.123.101:6810/2110476213,v1:192.168.123.101:6811/2110476213] boot 2026-03-07T10:15:31.231 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:31 vm04 ceph-mon[49935]: osdmap e12: 2 total, 2 up, 2 in 2026-03-07T10:15:31.231 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:31 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:15:31.231 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:31 vm04 ceph-mon[49935]: from='client.? 192.168.123.104:0/1749529563' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "9403c18e-0c42-46a6-a584-58f450d0eb95"}]: dispatch 2026-03-07T10:15:31.231 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:31 vm04 ceph-mon[49935]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "9403c18e-0c42-46a6-a584-58f450d0eb95"}]: dispatch 2026-03-07T10:15:31.231 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:31 vm04 ceph-mon[49935]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "9403c18e-0c42-46a6-a584-58f450d0eb95"}]': finished 2026-03-07T10:15:31.231 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:31 vm04 ceph-mon[49935]: osdmap e13: 3 total, 2 up, 3 in 2026-03-07T10:15:31.231 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:31 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:15:31.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:31 vm01 ceph-mon[49602]: purged_snaps scrub starts 2026-03-07T10:15:31.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:31 vm01 ceph-mon[49602]: purged_snaps scrub ok 2026-03-07T10:15:31.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:31 vm01 ceph-mon[49602]: osd.1 [v2:192.168.123.101:6810/2110476213,v1:192.168.123.101:6811/2110476213] boot 2026-03-07T10:15:31.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:31 vm01 ceph-mon[49602]: osdmap e12: 2 total, 2 up, 2 in 2026-03-07T10:15:31.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:31 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:15:31.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:31 vm01 ceph-mon[49602]: from='client.? 192.168.123.104:0/1749529563' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "9403c18e-0c42-46a6-a584-58f450d0eb95"}]: dispatch 2026-03-07T10:15:31.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:31 vm01 ceph-mon[49602]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "9403c18e-0c42-46a6-a584-58f450d0eb95"}]: dispatch 2026-03-07T10:15:31.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:31 vm01 ceph-mon[49602]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "9403c18e-0c42-46a6-a584-58f450d0eb95"}]': finished 2026-03-07T10:15:31.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:31 vm01 ceph-mon[49602]: osdmap e13: 3 total, 2 up, 3 in 2026-03-07T10:15:31.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:31 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:15:31.682 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:31 vm07 ceph-mon[68568]: purged_snaps scrub starts 2026-03-07T10:15:31.682 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:31 vm07 ceph-mon[68568]: purged_snaps scrub ok 2026-03-07T10:15:31.682 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:31 vm07 ceph-mon[68568]: osd.1 [v2:192.168.123.101:6810/2110476213,v1:192.168.123.101:6811/2110476213] boot 2026-03-07T10:15:31.682 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:31 vm07 ceph-mon[68568]: osdmap e12: 2 total, 2 up, 2 in 2026-03-07T10:15:31.682 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:31 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-07T10:15:31.682 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:31 vm07 ceph-mon[68568]: from='client.? 192.168.123.104:0/1749529563' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "9403c18e-0c42-46a6-a584-58f450d0eb95"}]: dispatch 2026-03-07T10:15:31.682 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:31 vm07 ceph-mon[68568]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "9403c18e-0c42-46a6-a584-58f450d0eb95"}]: dispatch 2026-03-07T10:15:31.682 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:31 vm07 ceph-mon[68568]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "9403c18e-0c42-46a6-a584-58f450d0eb95"}]': finished 2026-03-07T10:15:31.682 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:31 vm07 ceph-mon[68568]: osdmap e13: 3 total, 2 up, 3 in 2026-03-07T10:15:31.682 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:31 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:15:32.217 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:32 vm07 ceph-mon[68568]: from='client.? 192.168.123.104:0/4168066315' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-07T10:15:32.217 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:32 vm07 ceph-mon[68568]: pgmap v33: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:15:32.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:32 vm01 ceph-mon[49602]: from='client.? 192.168.123.104:0/4168066315' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-07T10:15:32.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:32 vm01 ceph-mon[49602]: pgmap v33: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:15:32.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:32 vm04 ceph-mon[49935]: from='client.? 192.168.123.104:0/4168066315' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-07T10:15:32.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:32 vm04 ceph-mon[49935]: pgmap v33: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:15:35.000 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:34 vm07 ceph-mon[68568]: pgmap v34: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:15:35.000 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:34 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:35.000 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:34 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:35.000 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:34 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:35.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:34 vm04 ceph-mon[49935]: pgmap v34: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:15:35.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:34 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:35.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:34 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:35.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:34 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:35.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:34 vm01 ceph-mon[49602]: pgmap v34: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:15:35.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:34 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:35.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:34 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:35.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:34 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:35.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:35 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-07T10:15:35.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:35 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:36.182 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:35 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-07T10:15:36.182 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:35 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:36.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:35 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-07T10:15:36.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:35 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:36.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:36 vm04 ceph-mon[49935]: Deploying daemon osd.2 on vm04 2026-03-07T10:15:36.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:36 vm04 ceph-mon[49935]: pgmap v35: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:15:37.116 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:36 vm01 ceph-mon[49602]: Deploying daemon osd.2 on vm04 2026-03-07T10:15:37.116 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:36 vm01 ceph-mon[49602]: pgmap v35: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:15:37.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:36 vm07 ceph-mon[68568]: Deploying daemon osd.2 on vm04 2026-03-07T10:15:37.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:36 vm07 ceph-mon[68568]: pgmap v35: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:15:38.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:38 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:38.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:38 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:38.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:38 vm07 ceph-mon[68568]: Detected new or changed devices on vm01 2026-03-07T10:15:38.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:38 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:38.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:38 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:38.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:38 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:38.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:38 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:38.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:38 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:38.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:38 vm07 ceph-mon[68568]: pgmap v36: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:15:38.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:38 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:38.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:38 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:38.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:38 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:38.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:38 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:38.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:38 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:38.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:38 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:38.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:38 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:38.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:38 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:38.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:38 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:38.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:38 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:38.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:38 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:38.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:38 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:38.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:38 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:38.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:38 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:38.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:38 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:38.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:38 vm01 ceph-mon[49602]: Detected new or changed devices on vm01 2026-03-07T10:15:38.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:38 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:38.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:38 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:38.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:38 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:38.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:38 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:38.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:38 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:38.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:38 vm01 ceph-mon[49602]: pgmap v36: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:15:38.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:38 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:38.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:38 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:38.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:38 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:38.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:38 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:38.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:38 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:38.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:38 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:38.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:38 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:38.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:38 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:38.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:38 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:38.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:38 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:38.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:38 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:38.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:38 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:38.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:38 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:38.852 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:38 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:38.852 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:38 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:38.852 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:38 vm04 ceph-mon[49935]: Detected new or changed devices on vm01 2026-03-07T10:15:38.852 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:38 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:38.852 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:38 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:38.852 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:38 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:38.852 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:38 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:38.852 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:38 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:38.852 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:38 vm04 ceph-mon[49935]: pgmap v36: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:15:38.852 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:38 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:38.852 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:38 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:38.852 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:38 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:38.852 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:38 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:38.852 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:38 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:38.852 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:38 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:38.852 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:38 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:38.852 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:38 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:38.852 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:38 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:38.852 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:38 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:38.852 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:38 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:38.852 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:38 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:38.852 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:38 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:39.565 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:39 vm04 ceph-mon[49935]: from='osd.2 [v2:192.168.123.104:6800/3187441749,v1:192.168.123.104:6801/3187441749]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-07T10:15:39.565 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:39 vm04 ceph-mon[49935]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-07T10:15:39.643 INFO:teuthology.orchestra.run.vm04.stdout:Created osd(s) 2 on host 'vm04' 2026-03-07T10:15:39.707 DEBUG:teuthology.orchestra.run.vm04:osd.2> sudo journalctl -f -n 0 -u ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@osd.2.service 2026-03-07T10:15:39.709 INFO:tasks.cephadm:Deploying osd.3 on vm04 with /dev/vdd... 2026-03-07T10:15:39.709 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- lvm zap /dev/vdd 2026-03-07T10:15:39.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:39 vm01 ceph-mon[49602]: from='osd.2 [v2:192.168.123.104:6800/3187441749,v1:192.168.123.104:6801/3187441749]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-07T10:15:39.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:39 vm01 ceph-mon[49602]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-07T10:15:39.923 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.b/config 2026-03-07T10:15:39.932 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:39 vm07 ceph-mon[68568]: from='osd.2 [v2:192.168.123.104:6800/3187441749,v1:192.168.123.104:6801/3187441749]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-07T10:15:39.933 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:39 vm07 ceph-mon[68568]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-07T10:15:40.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:40 vm01 ceph-mon[49602]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-07T10:15:40.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:40 vm01 ceph-mon[49602]: osdmap e14: 3 total, 2 up, 3 in 2026-03-07T10:15:40.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:40 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:15:40.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:40 vm01 ceph-mon[49602]: from='osd.2 [v2:192.168.123.104:6800/3187441749,v1:192.168.123.104:6801/3187441749]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-07T10:15:40.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:40 vm01 ceph-mon[49602]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-07T10:15:40.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:40 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:40.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:40 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:15:40.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:40 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:40.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:40 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:40.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:40 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:40.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:40 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:40.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:40 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:40.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:40 vm01 ceph-mon[49602]: pgmap v38: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:15:40.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:40 vm04 ceph-mon[49935]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-07T10:15:40.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:40 vm04 ceph-mon[49935]: osdmap e14: 3 total, 2 up, 3 in 2026-03-07T10:15:40.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:40 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:15:40.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:40 vm04 ceph-mon[49935]: from='osd.2 [v2:192.168.123.104:6800/3187441749,v1:192.168.123.104:6801/3187441749]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-07T10:15:40.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:40 vm04 ceph-mon[49935]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-07T10:15:40.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:40 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:40.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:40 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:15:40.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:40 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:40.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:40 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:40.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:40 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:40.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:40 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:40.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:40 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:40.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:40 vm04 ceph-mon[49935]: pgmap v38: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:15:40.850 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 07 10:15:40 vm04 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-2[54651]: 2026-03-07T10:15:40.453+0000 7fc6c9491640 -1 osd.2 0 waiting for initial osdmap 2026-03-07T10:15:40.850 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 07 10:15:40 vm04 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-2[54651]: 2026-03-07T10:15:40.457+0000 7fc6c4aa8640 -1 osd.2 15 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-07T10:15:40.932 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:40 vm07 ceph-mon[68568]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-07T10:15:40.932 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:40 vm07 ceph-mon[68568]: osdmap e14: 3 total, 2 up, 3 in 2026-03-07T10:15:40.932 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:40 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:15:40.932 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:40 vm07 ceph-mon[68568]: from='osd.2 [v2:192.168.123.104:6800/3187441749,v1:192.168.123.104:6801/3187441749]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-07T10:15:40.932 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:40 vm07 ceph-mon[68568]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-07T10:15:40.932 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:40 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:40.932 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:40 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:15:40.932 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:40 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:40.932 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:40 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:40.932 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:40 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:40.932 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:40 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:40.932 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:40 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:40.932 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:40 vm07 ceph-mon[68568]: pgmap v38: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:15:41.034 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-07T10:15:41.051 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph orch daemon add osd vm04:/dev/vdd 2026-03-07T10:15:41.232 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.b/config 2026-03-07T10:15:41.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:41 vm04 ceph-mon[49935]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-03-07T10:15:41.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:41 vm04 ceph-mon[49935]: osdmap e15: 3 total, 2 up, 3 in 2026-03-07T10:15:41.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:41 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:15:41.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:41 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:15:41.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:41 vm01 ceph-mon[49602]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-03-07T10:15:41.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:41 vm01 ceph-mon[49602]: osdmap e15: 3 total, 2 up, 3 in 2026-03-07T10:15:41.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:41 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:15:41.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:41 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:15:41.932 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:41 vm07 ceph-mon[68568]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-03-07T10:15:41.932 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:41 vm07 ceph-mon[68568]: osdmap e15: 3 total, 2 up, 3 in 2026-03-07T10:15:41.932 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:41 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:15:41.932 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:41 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:15:42.707 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:42 vm04 ceph-mon[49935]: purged_snaps scrub starts 2026-03-07T10:15:42.707 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:42 vm04 ceph-mon[49935]: purged_snaps scrub ok 2026-03-07T10:15:42.707 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:42 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:15:42.707 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:42 vm04 ceph-mon[49935]: osd.2 [v2:192.168.123.104:6800/3187441749,v1:192.168.123.104:6801/3187441749] boot 2026-03-07T10:15:42.707 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:42 vm04 ceph-mon[49935]: osdmap e16: 3 total, 3 up, 3 in 2026-03-07T10:15:42.707 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:42 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:15:42.707 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:42 vm04 ceph-mon[49935]: from='client.24175 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm04:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:15:42.707 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:42 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-07T10:15:42.707 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:42 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-07T10:15:42.708 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:42 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:42.708 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:42 vm04 ceph-mon[49935]: pgmap v41: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:15:42.708 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:42 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]: dispatch 2026-03-07T10:15:42.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:42 vm01 ceph-mon[49602]: purged_snaps scrub starts 2026-03-07T10:15:42.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:42 vm01 ceph-mon[49602]: purged_snaps scrub ok 2026-03-07T10:15:42.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:42 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:15:42.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:42 vm01 ceph-mon[49602]: osd.2 [v2:192.168.123.104:6800/3187441749,v1:192.168.123.104:6801/3187441749] boot 2026-03-07T10:15:42.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:42 vm01 ceph-mon[49602]: osdmap e16: 3 total, 3 up, 3 in 2026-03-07T10:15:42.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:42 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:15:42.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:42 vm01 ceph-mon[49602]: from='client.24175 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm04:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:15:42.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:42 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-07T10:15:42.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:42 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-07T10:15:42.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:42 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:42.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:42 vm01 ceph-mon[49602]: pgmap v41: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:15:42.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:42 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]: dispatch 2026-03-07T10:15:42.932 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:42 vm07 ceph-mon[68568]: purged_snaps scrub starts 2026-03-07T10:15:42.933 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:42 vm07 ceph-mon[68568]: purged_snaps scrub ok 2026-03-07T10:15:42.933 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:42 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:15:42.933 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:42 vm07 ceph-mon[68568]: osd.2 [v2:192.168.123.104:6800/3187441749,v1:192.168.123.104:6801/3187441749] boot 2026-03-07T10:15:42.933 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:42 vm07 ceph-mon[68568]: osdmap e16: 3 total, 3 up, 3 in 2026-03-07T10:15:42.933 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:42 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-07T10:15:42.933 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:42 vm07 ceph-mon[68568]: from='client.24175 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm04:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:15:42.933 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:42 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-07T10:15:42.933 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:42 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-07T10:15:42.933 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:42 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:42.933 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:42 vm07 ceph-mon[68568]: pgmap v41: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-07T10:15:42.933 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:42 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]: dispatch 2026-03-07T10:15:43.461 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:43 vm04 sudo[56003]: ceph : PWD=/ ; USER=root ; COMMAND=/usr/sbin/smartctl -x --json=o /dev/vda 2026-03-07T10:15:43.461 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:43 vm04 sudo[56003]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory 2026-03-07T10:15:43.461 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:43 vm04 sudo[56003]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=167) 2026-03-07T10:15:43.461 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:43 vm04 sudo[56003]: pam_unix(sudo:session): session closed for user root 2026-03-07T10:15:43.461 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 07 10:15:43 vm04 sudo[55962]: ceph : PWD=/ ; USER=root ; COMMAND=/usr/sbin/smartctl -x --json=o /dev/vde 2026-03-07T10:15:43.461 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 07 10:15:43 vm04 sudo[55962]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory 2026-03-07T10:15:43.461 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 07 10:15:43 vm04 sudo[55962]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=167) 2026-03-07T10:15:43.461 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 07 10:15:43 vm04 sudo[55962]: pam_unix(sudo:session): session closed for user root 2026-03-07T10:15:43.466 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:43 vm01 sudo[62930]: ceph : PWD=/ ; USER=root ; COMMAND=/usr/sbin/smartctl -x --json=o /dev/vda 2026-03-07T10:15:43.466 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:43 vm01 sudo[62930]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory 2026-03-07T10:15:43.466 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:43 vm01 sudo[62930]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=167) 2026-03-07T10:15:43.466 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:43 vm01 sudo[62930]: pam_unix(sudo:session): session closed for user root 2026-03-07T10:15:43.466 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 07 10:15:43 vm01 sudo[62924]: ceph : PWD=/ ; USER=root ; COMMAND=/usr/sbin/smartctl -x --json=o /dev/vde 2026-03-07T10:15:43.466 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 07 10:15:43 vm01 sudo[62924]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory 2026-03-07T10:15:43.466 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 07 10:15:43 vm01 sudo[62924]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=167) 2026-03-07T10:15:43.466 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 07 10:15:43 vm01 sudo[62924]: pam_unix(sudo:session): session closed for user root 2026-03-07T10:15:43.467 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 07 10:15:43 vm01 sudo[62927]: ceph : PWD=/ ; USER=root ; COMMAND=/usr/sbin/smartctl -x --json=o /dev/vdd 2026-03-07T10:15:43.467 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 07 10:15:43 vm01 sudo[62927]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory 2026-03-07T10:15:43.467 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 07 10:15:43 vm01 sudo[62927]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=167) 2026-03-07T10:15:43.467 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 07 10:15:43 vm01 sudo[62927]: pam_unix(sudo:session): session closed for user root 2026-03-07T10:15:43.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:43 vm07 sudo[70597]: ceph : PWD=/ ; USER=root ; COMMAND=/usr/sbin/smartctl -x --json=o /dev/vda 2026-03-07T10:15:43.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:43 vm07 sudo[70597]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory 2026-03-07T10:15:43.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:43 vm07 sudo[70597]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=167) 2026-03-07T10:15:43.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:43 vm07 sudo[70597]: pam_unix(sudo:session): session closed for user root 2026-03-07T10:15:43.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:43 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]': finished 2026-03-07T10:15:43.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:43 vm07 ceph-mon[68568]: osdmap e17: 3 total, 3 up, 3 in 2026-03-07T10:15:43.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:43 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch 2026-03-07T10:15:43.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:43 vm07 ceph-mon[68568]: from='client.? 192.168.123.104:0/2196728721' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "155a69d2-3a90-4797-870f-0d55995439d5"}]: dispatch 2026-03-07T10:15:43.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:43 vm07 ceph-mon[68568]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "155a69d2-3a90-4797-870f-0d55995439d5"}]: dispatch 2026-03-07T10:15:43.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:43 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-03-07T10:15:43.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:43 vm07 ceph-mon[68568]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "155a69d2-3a90-4797-870f-0d55995439d5"}]': finished 2026-03-07T10:15:43.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:43 vm07 ceph-mon[68568]: osdmap e18: 4 total, 3 up, 4 in 2026-03-07T10:15:43.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:43 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-07T10:15:43.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:43 vm07 ceph-mon[68568]: from='client.? 192.168.123.104:0/1053048786' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-07T10:15:43.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:43 vm07 ceph-mon[68568]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-07T10:15:43.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:43 vm07 ceph-mon[68568]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-07T10:15:43.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:43 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:15:43.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:43 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:15:43.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:43 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:15:43.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:43 vm07 ceph-mon[68568]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-07T10:15:43.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:43 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:15:43.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:43 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:15:43.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:43 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:15:43.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:43 vm07 ceph-mon[68568]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-07T10:15:43.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:43 vm07 ceph-mon[68568]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-07T10:15:43.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:43 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:15:43.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:43 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:15:43.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:43 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:15:43.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:43 vm07 ceph-mon[68568]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-07T10:15:43.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:43 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]': finished 2026-03-07T10:15:43.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:43 vm01 ceph-mon[49602]: osdmap e17: 3 total, 3 up, 3 in 2026-03-07T10:15:43.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:43 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch 2026-03-07T10:15:43.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:43 vm01 ceph-mon[49602]: from='client.? 192.168.123.104:0/2196728721' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "155a69d2-3a90-4797-870f-0d55995439d5"}]: dispatch 2026-03-07T10:15:43.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:43 vm01 ceph-mon[49602]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "155a69d2-3a90-4797-870f-0d55995439d5"}]: dispatch 2026-03-07T10:15:43.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:43 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-03-07T10:15:43.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:43 vm01 ceph-mon[49602]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "155a69d2-3a90-4797-870f-0d55995439d5"}]': finished 2026-03-07T10:15:43.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:43 vm01 ceph-mon[49602]: osdmap e18: 4 total, 3 up, 4 in 2026-03-07T10:15:43.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:43 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-07T10:15:43.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:43 vm01 ceph-mon[49602]: from='client.? 192.168.123.104:0/1053048786' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-07T10:15:43.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:43 vm01 ceph-mon[49602]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-07T10:15:43.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:43 vm01 ceph-mon[49602]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-07T10:15:43.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:43 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:15:43.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:43 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:15:43.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:43 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:15:43.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:43 vm01 ceph-mon[49602]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-07T10:15:43.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:43 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:15:43.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:43 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:15:43.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:43 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:15:43.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:43 vm01 ceph-mon[49602]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-07T10:15:43.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:43 vm01 ceph-mon[49602]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-07T10:15:43.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:43 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:15:43.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:43 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:15:43.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:43 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:15:43.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:43 vm01 ceph-mon[49602]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-07T10:15:43.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:43 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]': finished 2026-03-07T10:15:43.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:43 vm04 ceph-mon[49935]: osdmap e17: 3 total, 3 up, 3 in 2026-03-07T10:15:43.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:43 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch 2026-03-07T10:15:43.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:43 vm04 ceph-mon[49935]: from='client.? 192.168.123.104:0/2196728721' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "155a69d2-3a90-4797-870f-0d55995439d5"}]: dispatch 2026-03-07T10:15:43.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:43 vm04 ceph-mon[49935]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "155a69d2-3a90-4797-870f-0d55995439d5"}]: dispatch 2026-03-07T10:15:43.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:43 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-03-07T10:15:43.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:43 vm04 ceph-mon[49935]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "155a69d2-3a90-4797-870f-0d55995439d5"}]': finished 2026-03-07T10:15:43.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:43 vm04 ceph-mon[49935]: osdmap e18: 4 total, 3 up, 4 in 2026-03-07T10:15:43.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:43 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-07T10:15:43.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:43 vm04 ceph-mon[49935]: from='client.? 192.168.123.104:0/1053048786' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-07T10:15:43.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:43 vm04 ceph-mon[49935]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-07T10:15:43.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:43 vm04 ceph-mon[49935]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-07T10:15:43.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:43 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:15:43.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:43 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:15:43.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:43 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:15:43.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:43 vm04 ceph-mon[49935]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-07T10:15:43.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:43 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:15:43.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:43 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:15:43.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:43 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:15:43.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:43 vm04 ceph-mon[49935]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-07T10:15:43.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:43 vm04 ceph-mon[49935]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-07T10:15:43.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:43 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:15:43.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:43 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:15:43.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:43 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-07T10:15:43.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:43 vm04 ceph-mon[49935]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-07T10:15:44.932 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:44 vm07 ceph-mon[68568]: osdmap e19: 4 total, 3 up, 4 in 2026-03-07T10:15:44.932 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:44 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-07T10:15:44.932 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:44 vm07 ceph-mon[68568]: pgmap v45: 1 pgs: 1 unknown; 0 B data, 79 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:15:44.932 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:44 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:44.933 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:44 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:44.933 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:44 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:44.933 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:44 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:44.933 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:44 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:44.933 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:44 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:44.933 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:44 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:44.933 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:44 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:44.933 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:44 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:44.933 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:44 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:44.933 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:44 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:44.933 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:44 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:44.933 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:44 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:44.933 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:44 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:44.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:44 vm01 ceph-mon[49602]: osdmap e19: 4 total, 3 up, 4 in 2026-03-07T10:15:44.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:44 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-07T10:15:44.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:44 vm01 ceph-mon[49602]: pgmap v45: 1 pgs: 1 unknown; 0 B data, 79 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:15:44.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:44 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:44.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:44 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:44.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:44 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:44.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:44 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:44.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:44 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:44.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:44 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:44.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:44 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:44.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:44 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:44.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:44 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:44.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:44 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:44.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:44 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:44.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:44 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:44.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:44 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:44.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:44 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:45.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:44 vm04 ceph-mon[49935]: osdmap e19: 4 total, 3 up, 4 in 2026-03-07T10:15:45.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:44 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-07T10:15:45.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:44 vm04 ceph-mon[49935]: pgmap v45: 1 pgs: 1 unknown; 0 B data, 79 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:15:45.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:44 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:45.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:44 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:45.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:44 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:45.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:44 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:45.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:44 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:45.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:44 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:45.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:44 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:45.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:44 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:45.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:44 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:45.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:44 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:45.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:44 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:45.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:44 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:45.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:44 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:45.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:44 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:45.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:45 vm04 ceph-mon[49935]: mgrmap e15: a(active, since 73s), standbys: b 2026-03-07T10:15:45.932 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:45 vm07 ceph-mon[68568]: mgrmap e15: a(active, since 73s), standbys: b 2026-03-07T10:15:45.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:45 vm01 ceph-mon[49602]: mgrmap e15: a(active, since 73s), standbys: b 2026-03-07T10:15:47.018 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:46 vm04 ceph-mon[49935]: pgmap v46: 1 pgs: 1 unknown; 0 B data, 80 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:15:47.182 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:46 vm07 ceph-mon[68568]: pgmap v46: 1 pgs: 1 unknown; 0 B data, 80 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:15:47.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:46 vm01 ceph-mon[49602]: pgmap v46: 1 pgs: 1 unknown; 0 B data, 80 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:15:47.994 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:47 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-07T10:15:47.994 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:47 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:48.182 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:47 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-07T10:15:48.182 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:47 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:48.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:47 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-07T10:15:48.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:47 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:48.886 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:48 vm04 ceph-mon[49935]: Deploying daemon osd.3 on vm04 2026-03-07T10:15:48.886 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:48 vm04 ceph-mon[49935]: pgmap v47: 1 pgs: 1 active+clean; 449 KiB data, 81 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:15:49.182 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:48 vm07 ceph-mon[68568]: Deploying daemon osd.3 on vm04 2026-03-07T10:15:49.182 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:48 vm07 ceph-mon[68568]: pgmap v47: 1 pgs: 1 active+clean; 449 KiB data, 81 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:15:49.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:48 vm01 ceph-mon[49602]: Deploying daemon osd.3 on vm04 2026-03-07T10:15:49.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:48 vm01 ceph-mon[49602]: pgmap v47: 1 pgs: 1 active+clean; 449 KiB data, 81 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:15:51.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:50 vm04 ceph-mon[49935]: pgmap v48: 1 pgs: 1 active+clean; 449 KiB data, 81 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:15:51.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:50 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:51.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:50 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:51.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:50 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:51.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:50 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:51.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:50 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:51.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:50 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:51.182 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:50 vm07 ceph-mon[68568]: pgmap v48: 1 pgs: 1 active+clean; 449 KiB data, 81 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:15:51.182 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:50 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:51.182 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:50 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:51.182 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:50 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:51.182 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:50 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:51.182 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:50 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:51.182 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:50 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:51.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:50 vm01 ceph-mon[49602]: pgmap v48: 1 pgs: 1 active+clean; 449 KiB data, 81 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:15:51.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:50 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:51.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:50 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:51.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:50 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:51.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:50 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:51.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:50 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:51.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:50 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:52.001 INFO:teuthology.orchestra.run.vm04.stdout:Created osd(s) 3 on host 'vm04' 2026-03-07T10:15:52.079 DEBUG:teuthology.orchestra.run.vm04:osd.3> sudo journalctl -f -n 0 -u ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@osd.3.service 2026-03-07T10:15:52.080 INFO:tasks.cephadm:Deploying osd.4 on vm07 with /dev/vde... 2026-03-07T10:15:52.081 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- lvm zap /dev/vde 2026-03-07T10:15:52.252 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.c/config 2026-03-07T10:15:52.752 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 07 10:15:52 vm04 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-3[57953]: 2026-03-07T10:15:52.415+0000 7fb1c12f6740 -1 osd.3 0 log_to_monitors true 2026-03-07T10:15:53.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:52 vm04 ceph-mon[49935]: pgmap v49: 1 pgs: 1 active+clean; 449 KiB data, 81 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:15:53.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:52 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:53.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:52 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:15:53.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:52 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:53.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:52 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:53.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:52 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:53.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:52 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:53.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:52 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:53.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:52 vm04 ceph-mon[49935]: from='osd.3 [v2:192.168.123.104:6808/1533780217,v1:192.168.123.104:6809/1533780217]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-07T10:15:53.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:52 vm04 ceph-mon[49935]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-07T10:15:53.182 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:52 vm07 ceph-mon[68568]: pgmap v49: 1 pgs: 1 active+clean; 449 KiB data, 81 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:15:53.182 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:52 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:53.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:52 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:15:53.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:52 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:53.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:52 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:53.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:52 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:53.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:52 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:53.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:52 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:53.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:52 vm07 ceph-mon[68568]: from='osd.3 [v2:192.168.123.104:6808/1533780217,v1:192.168.123.104:6809/1533780217]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-07T10:15:53.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:52 vm07 ceph-mon[68568]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-07T10:15:53.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:52 vm01 ceph-mon[49602]: pgmap v49: 1 pgs: 1 active+clean; 449 KiB data, 81 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:15:53.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:52 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:15:53.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:52 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:15:53.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:52 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:53.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:52 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:53.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:52 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:15:53.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:52 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:53.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:52 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:53.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:52 vm01 ceph-mon[49602]: from='osd.3 [v2:192.168.123.104:6808/1533780217,v1:192.168.123.104:6809/1533780217]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-07T10:15:53.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:52 vm01 ceph-mon[49602]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-07T10:15:53.389 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-07T10:15:53.413 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph orch daemon add osd vm07:/dev/vde 2026-03-07T10:15:53.593 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.c/config 2026-03-07T10:15:54.287 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:54 vm07 ceph-mon[68568]: from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-07T10:15:54.287 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:54 vm07 ceph-mon[68568]: osdmap e20: 4 total, 3 up, 4 in 2026-03-07T10:15:54.287 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:54 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-07T10:15:54.287 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:54 vm07 ceph-mon[68568]: from='osd.3 [v2:192.168.123.104:6808/1533780217,v1:192.168.123.104:6809/1533780217]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-07T10:15:54.287 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:54 vm07 ceph-mon[68568]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-07T10:15:54.287 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:54 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-07T10:15:54.287 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:54 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-07T10:15:54.287 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:54 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:54.350 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 07 10:15:54 vm04 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-3[57953]: 2026-03-07T10:15:54.013+0000 7fb1bd277640 -1 osd.3 0 waiting for initial osdmap 2026-03-07T10:15:54.350 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 07 10:15:54 vm04 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-3[57953]: 2026-03-07T10:15:54.019+0000 7fb1b90a1640 -1 osd.3 21 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-07T10:15:54.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:54 vm04 ceph-mon[49935]: from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-07T10:15:54.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:54 vm04 ceph-mon[49935]: osdmap e20: 4 total, 3 up, 4 in 2026-03-07T10:15:54.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:54 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-07T10:15:54.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:54 vm04 ceph-mon[49935]: from='osd.3 [v2:192.168.123.104:6808/1533780217,v1:192.168.123.104:6809/1533780217]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-07T10:15:54.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:54 vm04 ceph-mon[49935]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-07T10:15:54.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:54 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-07T10:15:54.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:54 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-07T10:15:54.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:54 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:54.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:54 vm01 ceph-mon[49602]: from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-07T10:15:54.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:54 vm01 ceph-mon[49602]: osdmap e20: 4 total, 3 up, 4 in 2026-03-07T10:15:54.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:54 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-07T10:15:54.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:54 vm01 ceph-mon[49602]: from='osd.3 [v2:192.168.123.104:6808/1533780217,v1:192.168.123.104:6809/1533780217]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-07T10:15:54.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:54 vm01 ceph-mon[49602]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-07T10:15:54.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:54 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-07T10:15:54.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:54 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-07T10:15:54.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:54 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:15:55.156 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:55 vm07 ceph-mon[68568]: pgmap v51: 1 pgs: 1 active+clean; 449 KiB data, 81 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:15:55.156 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:55 vm07 ceph-mon[68568]: from='client.24194 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm07:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:15:55.156 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:55 vm07 ceph-mon[68568]: from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-03-07T10:15:55.156 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:55 vm07 ceph-mon[68568]: osdmap e21: 4 total, 3 up, 4 in 2026-03-07T10:15:55.156 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:55 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-07T10:15:55.156 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:55 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-07T10:15:55.156 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:55 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:55.156 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:55 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:55.156 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:55 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:55.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:55 vm04 ceph-mon[49935]: pgmap v51: 1 pgs: 1 active+clean; 449 KiB data, 81 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:15:55.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:55 vm04 ceph-mon[49935]: from='client.24194 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm07:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:15:55.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:55 vm04 ceph-mon[49935]: from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-03-07T10:15:55.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:55 vm04 ceph-mon[49935]: osdmap e21: 4 total, 3 up, 4 in 2026-03-07T10:15:55.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:55 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-07T10:15:55.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:55 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-07T10:15:55.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:55 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:55.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:55 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:55.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:55 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:55.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:55 vm01 ceph-mon[49602]: pgmap v51: 1 pgs: 1 active+clean; 449 KiB data, 81 MiB used, 60 GiB / 60 GiB avail 2026-03-07T10:15:55.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:55 vm01 ceph-mon[49602]: from='client.24194 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm07:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:15:55.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:55 vm01 ceph-mon[49602]: from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-03-07T10:15:55.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:55 vm01 ceph-mon[49602]: osdmap e21: 4 total, 3 up, 4 in 2026-03-07T10:15:55.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:55 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-07T10:15:55.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:55 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-07T10:15:55.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:55 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:55.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:55 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:55.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:55 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:56.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:56 vm04 ceph-mon[49935]: purged_snaps scrub starts 2026-03-07T10:15:56.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:56 vm04 ceph-mon[49935]: purged_snaps scrub ok 2026-03-07T10:15:56.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:56 vm04 ceph-mon[49935]: osd.3 [v2:192.168.123.104:6808/1533780217,v1:192.168.123.104:6809/1533780217] boot 2026-03-07T10:15:56.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:56 vm04 ceph-mon[49935]: osdmap e22: 4 total, 4 up, 4 in 2026-03-07T10:15:56.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:56 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-07T10:15:56.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:56 vm04 ceph-mon[49935]: from='client.? 192.168.123.107:0/122974417' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "654075eb-fb5d-47cc-abb4-791f434384d1"}]: dispatch 2026-03-07T10:15:56.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:56 vm04 ceph-mon[49935]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "654075eb-fb5d-47cc-abb4-791f434384d1"}]: dispatch 2026-03-07T10:15:56.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:56 vm04 ceph-mon[49935]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "654075eb-fb5d-47cc-abb4-791f434384d1"}]': finished 2026-03-07T10:15:56.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:56 vm04 ceph-mon[49935]: osdmap e23: 5 total, 4 up, 5 in 2026-03-07T10:15:56.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:56 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-07T10:15:56.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:56 vm04 ceph-mon[49935]: from='client.? 192.168.123.107:0/2956323175' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-07T10:15:56.434 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:56 vm07 ceph-mon[68568]: purged_snaps scrub starts 2026-03-07T10:15:56.434 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:56 vm07 ceph-mon[68568]: purged_snaps scrub ok 2026-03-07T10:15:56.434 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:56 vm07 ceph-mon[68568]: osd.3 [v2:192.168.123.104:6808/1533780217,v1:192.168.123.104:6809/1533780217] boot 2026-03-07T10:15:56.434 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:56 vm07 ceph-mon[68568]: osdmap e22: 4 total, 4 up, 4 in 2026-03-07T10:15:56.434 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:56 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-07T10:15:56.434 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:56 vm07 ceph-mon[68568]: from='client.? 192.168.123.107:0/122974417' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "654075eb-fb5d-47cc-abb4-791f434384d1"}]: dispatch 2026-03-07T10:15:56.434 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:56 vm07 ceph-mon[68568]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "654075eb-fb5d-47cc-abb4-791f434384d1"}]: dispatch 2026-03-07T10:15:56.434 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:56 vm07 ceph-mon[68568]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "654075eb-fb5d-47cc-abb4-791f434384d1"}]': finished 2026-03-07T10:15:56.434 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:56 vm07 ceph-mon[68568]: osdmap e23: 5 total, 4 up, 5 in 2026-03-07T10:15:56.434 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:56 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-07T10:15:56.434 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:56 vm07 ceph-mon[68568]: from='client.? 192.168.123.107:0/2956323175' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-07T10:15:56.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:56 vm01 ceph-mon[49602]: purged_snaps scrub starts 2026-03-07T10:15:56.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:56 vm01 ceph-mon[49602]: purged_snaps scrub ok 2026-03-07T10:15:56.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:56 vm01 ceph-mon[49602]: osd.3 [v2:192.168.123.104:6808/1533780217,v1:192.168.123.104:6809/1533780217] boot 2026-03-07T10:15:56.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:56 vm01 ceph-mon[49602]: osdmap e22: 4 total, 4 up, 4 in 2026-03-07T10:15:56.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:56 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-07T10:15:56.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:56 vm01 ceph-mon[49602]: from='client.? 192.168.123.107:0/122974417' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "654075eb-fb5d-47cc-abb4-791f434384d1"}]: dispatch 2026-03-07T10:15:56.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:56 vm01 ceph-mon[49602]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "654075eb-fb5d-47cc-abb4-791f434384d1"}]: dispatch 2026-03-07T10:15:56.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:56 vm01 ceph-mon[49602]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "654075eb-fb5d-47cc-abb4-791f434384d1"}]': finished 2026-03-07T10:15:56.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:56 vm01 ceph-mon[49602]: osdmap e23: 5 total, 4 up, 5 in 2026-03-07T10:15:56.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:56 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-07T10:15:56.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:56 vm01 ceph-mon[49602]: from='client.? 192.168.123.107:0/2956323175' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-07T10:15:57.384 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:57 vm01 ceph-mon[49602]: pgmap v55: 1 pgs: 1 remapped; 449 KiB data, 107 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:15:57.384 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:57 vm01 ceph-mon[49602]: osdmap e24: 5 total, 4 up, 5 in 2026-03-07T10:15:57.384 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:57 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-07T10:15:57.432 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:57 vm07 ceph-mon[68568]: pgmap v55: 1 pgs: 1 remapped; 449 KiB data, 107 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:15:57.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:57 vm07 ceph-mon[68568]: osdmap e24: 5 total, 4 up, 5 in 2026-03-07T10:15:57.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:57 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-07T10:15:57.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:57 vm04 ceph-mon[49935]: pgmap v55: 1 pgs: 1 remapped; 449 KiB data, 107 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:15:57.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:57 vm04 ceph-mon[49935]: osdmap e24: 5 total, 4 up, 5 in 2026-03-07T10:15:57.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:57 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-07T10:15:59.105 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:58 vm07 ceph-mon[68568]: pgmap v57: 1 pgs: 1 remapped; 449 KiB data, 107 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:15:59.105 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:58 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:59.105 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:58 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:59.105 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:58 vm07 ceph-mon[68568]: Detected new or changed devices on vm01 2026-03-07T10:15:59.105 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:58 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:59.105 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:58 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:59.105 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:58 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:59.105 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:15:58 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:59.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:58 vm01 ceph-mon[49602]: pgmap v57: 1 pgs: 1 remapped; 449 KiB data, 107 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:15:59.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:58 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:59.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:58 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:59.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:58 vm01 ceph-mon[49602]: Detected new or changed devices on vm01 2026-03-07T10:15:59.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:58 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:59.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:58 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:59.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:58 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:59.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:15:58 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:59.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:58 vm04 ceph-mon[49935]: pgmap v57: 1 pgs: 1 remapped; 449 KiB data, 107 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:15:59.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:58 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:59.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:58 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:59.351 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:58 vm04 ceph-mon[49935]: Detected new or changed devices on vm01 2026-03-07T10:15:59.351 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:58 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:59.351 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:58 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:59.351 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:58 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:15:59.351 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:15:58 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:00.989 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:00 vm07 ceph-mon[68568]: pgmap v58: 1 pgs: 1 remapped; 449 KiB data, 107 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:16:00.989 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:00 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-07T10:16:00.989 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:00 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:01.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:00 vm01 ceph-mon[49602]: pgmap v58: 1 pgs: 1 remapped; 449 KiB data, 107 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:16:01.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:00 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-07T10:16:01.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:00 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:01.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:00 vm04 ceph-mon[49935]: pgmap v58: 1 pgs: 1 remapped; 449 KiB data, 107 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:16:01.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:00 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-07T10:16:01.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:00 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:02.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:01 vm07 ceph-mon[68568]: Deploying daemon osd.4 on vm07 2026-03-07T10:16:02.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:01 vm04 ceph-mon[49935]: Deploying daemon osd.4 on vm07 2026-03-07T10:16:02.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:01 vm01 ceph-mon[49602]: Deploying daemon osd.4 on vm07 2026-03-07T10:16:03.166 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:02 vm07 ceph-mon[68568]: pgmap v59: 1 pgs: 1 active+clean; 449 KiB data, 107 MiB used, 80 GiB / 80 GiB avail; 67 KiB/s, 0 objects/s recovering 2026-03-07T10:16:03.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:02 vm04 ceph-mon[49935]: pgmap v59: 1 pgs: 1 active+clean; 449 KiB data, 107 MiB used, 80 GiB / 80 GiB avail; 67 KiB/s, 0 objects/s recovering 2026-03-07T10:16:03.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:02 vm01 ceph-mon[49602]: pgmap v59: 1 pgs: 1 active+clean; 449 KiB data, 107 MiB used, 80 GiB / 80 GiB avail; 67 KiB/s, 0 objects/s recovering 2026-03-07T10:16:04.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:03 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:16:04.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:03 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:04.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:03 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:04.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:03 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:16:04.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:03 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:04.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:03 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:04.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:03 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:16:04.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:03 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:04.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:03 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:04.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:03 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:16:04.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:03 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:04.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:03 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:04.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:03 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:16:04.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:03 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:04.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:03 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:04.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:03 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:16:04.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:03 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:04.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:03 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:04.796 INFO:teuthology.orchestra.run.vm07.stdout:Created osd(s) 4 on host 'vm07' 2026-03-07T10:16:04.851 DEBUG:teuthology.orchestra.run.vm07:osd.4> sudo journalctl -f -n 0 -u ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@osd.4.service 2026-03-07T10:16:04.853 INFO:tasks.cephadm:Deploying osd.5 on vm07 with /dev/vdd... 2026-03-07T10:16:04.853 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- lvm zap /dev/vdd 2026-03-07T10:16:04.990 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:04 vm07 ceph-mon[68568]: pgmap v60: 1 pgs: 1 active+clean; 449 KiB data, 107 MiB used, 80 GiB / 80 GiB avail; 52 KiB/s, 0 objects/s recovering 2026-03-07T10:16:04.990 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:04 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:04.990 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:04 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:04.990 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:04 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:16:04.990 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:04 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:04.990 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:04 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:04.990 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:04 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:16:04.990 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:04 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:04.990 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:04 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:16:04.990 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:04 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:16:04.990 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:04 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:04.990 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:04 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:04.990 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:04 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:04.990 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:04 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:04.990 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:04 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:16:04.990 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:04 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:05.070 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:04 vm04 ceph-mon[49935]: pgmap v60: 1 pgs: 1 active+clean; 449 KiB data, 107 MiB used, 80 GiB / 80 GiB avail; 52 KiB/s, 0 objects/s recovering 2026-03-07T10:16:05.070 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:04 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:05.070 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:04 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:05.070 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:04 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:16:05.070 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:04 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:05.070 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:04 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:05.070 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:04 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:16:05.070 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:04 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:05.070 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:04 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:16:05.070 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:04 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:16:05.070 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:04 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:05.070 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:04 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:05.070 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:04 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:05.070 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:04 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:05.070 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:04 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:16:05.070 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:04 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:05.087 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.c/config 2026-03-07T10:16:05.258 INFO:journalctl@ceph.osd.4.vm07.stdout:Mar 07 10:16:05 vm07 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-4[72943]: 2026-03-07T10:16:05.019+0000 7fefdc1e2740 -1 osd.4 0 log_to_monitors true 2026-03-07T10:16:05.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:04 vm01 ceph-mon[49602]: pgmap v60: 1 pgs: 1 active+clean; 449 KiB data, 107 MiB used, 80 GiB / 80 GiB avail; 52 KiB/s, 0 objects/s recovering 2026-03-07T10:16:05.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:04 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:05.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:04 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:05.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:04 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:16:05.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:04 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:05.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:04 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:05.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:04 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:16:05.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:04 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:05.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:04 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:16:05.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:04 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:16:05.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:04 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:05.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:04 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:05.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:04 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:05.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:04 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:05.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:04 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:16:05.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:04 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:06.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:05 vm04 ceph-mon[49935]: Detected new or changed devices on vm04 2026-03-07T10:16:06.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:05 vm04 ceph-mon[49935]: Adjusting osd_memory_target on vm07 to 3329M 2026-03-07T10:16:06.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:05 vm04 ceph-mon[49935]: from='osd.4 [v2:192.168.123.107:6800/3003059232,v1:192.168.123.107:6801/3003059232]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-07T10:16:06.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:05 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:06.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:05 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:06.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:05 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:16:06.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:05 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:06.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:05 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:16:06.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:05 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:06.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:05 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:06.104 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:05 vm07 ceph-mon[68568]: Detected new or changed devices on vm04 2026-03-07T10:16:06.104 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:05 vm07 ceph-mon[68568]: Adjusting osd_memory_target on vm07 to 3329M 2026-03-07T10:16:06.104 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:05 vm07 ceph-mon[68568]: from='osd.4 [v2:192.168.123.107:6800/3003059232,v1:192.168.123.107:6801/3003059232]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-07T10:16:06.104 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:05 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:06.104 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:05 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:06.104 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:05 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:16:06.104 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:05 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:06.104 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:05 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:16:06.104 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:05 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:06.104 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:05 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:06.196 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-07T10:16:06.213 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph orch daemon add osd vm07:/dev/vdd 2026-03-07T10:16:06.387 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.c/config 2026-03-07T10:16:06.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:05 vm01 ceph-mon[49602]: Detected new or changed devices on vm04 2026-03-07T10:16:06.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:05 vm01 ceph-mon[49602]: Adjusting osd_memory_target on vm07 to 3329M 2026-03-07T10:16:06.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:05 vm01 ceph-mon[49602]: from='osd.4 [v2:192.168.123.107:6800/3003059232,v1:192.168.123.107:6801/3003059232]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-07T10:16:06.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:05 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:06.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:05 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:06.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:05 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:16:06.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:05 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:06.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:05 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:16:06.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:05 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:06.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:05 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:07.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:07 vm04 ceph-mon[49935]: pgmap v61: 1 pgs: 1 active+clean; 449 KiB data, 107 MiB used, 80 GiB / 80 GiB avail; 45 KiB/s, 0 objects/s recovering 2026-03-07T10:16:07.351 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:07 vm04 ceph-mon[49935]: from='osd.4 [v2:192.168.123.107:6800/3003059232,v1:192.168.123.107:6801/3003059232]' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-07T10:16:07.351 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:07 vm04 ceph-mon[49935]: osdmap e25: 5 total, 4 up, 5 in 2026-03-07T10:16:07.351 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:07 vm04 ceph-mon[49935]: from='osd.4 [v2:192.168.123.107:6800/3003059232,v1:192.168.123.107:6801/3003059232]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm07", "root=default"]}]: dispatch 2026-03-07T10:16:07.351 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:07 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-07T10:16:07.351 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:07 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-07T10:16:07.351 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:07 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-07T10:16:07.351 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:07 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:07.432 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:07 vm07 ceph-mon[68568]: pgmap v61: 1 pgs: 1 active+clean; 449 KiB data, 107 MiB used, 80 GiB / 80 GiB avail; 45 KiB/s, 0 objects/s recovering 2026-03-07T10:16:07.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:07 vm07 ceph-mon[68568]: from='osd.4 [v2:192.168.123.107:6800/3003059232,v1:192.168.123.107:6801/3003059232]' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-07T10:16:07.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:07 vm07 ceph-mon[68568]: osdmap e25: 5 total, 4 up, 5 in 2026-03-07T10:16:07.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:07 vm07 ceph-mon[68568]: from='osd.4 [v2:192.168.123.107:6800/3003059232,v1:192.168.123.107:6801/3003059232]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm07", "root=default"]}]: dispatch 2026-03-07T10:16:07.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:07 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-07T10:16:07.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:07 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-07T10:16:07.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:07 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-07T10:16:07.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:07 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:07.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:07 vm01 ceph-mon[49602]: pgmap v61: 1 pgs: 1 active+clean; 449 KiB data, 107 MiB used, 80 GiB / 80 GiB avail; 45 KiB/s, 0 objects/s recovering 2026-03-07T10:16:07.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:07 vm01 ceph-mon[49602]: from='osd.4 [v2:192.168.123.107:6800/3003059232,v1:192.168.123.107:6801/3003059232]' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-07T10:16:07.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:07 vm01 ceph-mon[49602]: osdmap e25: 5 total, 4 up, 5 in 2026-03-07T10:16:07.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:07 vm01 ceph-mon[49602]: from='osd.4 [v2:192.168.123.107:6800/3003059232,v1:192.168.123.107:6801/3003059232]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm07", "root=default"]}]: dispatch 2026-03-07T10:16:07.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:07 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-07T10:16:07.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:07 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-07T10:16:07.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:07 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-07T10:16:07.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:07 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:08.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:08 vm04 ceph-mon[49935]: from='client.24218 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm07:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:16:08.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:08 vm04 ceph-mon[49935]: from='osd.4 [v2:192.168.123.107:6800/3003059232,v1:192.168.123.107:6801/3003059232]' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm07", "root=default"]}]': finished 2026-03-07T10:16:08.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:08 vm04 ceph-mon[49935]: osdmap e26: 5 total, 4 up, 5 in 2026-03-07T10:16:08.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:08 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-07T10:16:08.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:08 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-07T10:16:08.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:08 vm04 ceph-mon[49935]: from='client.? 192.168.123.107:0/1287837008' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "9dec1d82-be8f-40f2-9e9a-cbb3b644be63"}]: dispatch 2026-03-07T10:16:08.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:08 vm04 ceph-mon[49935]: from='client.? 192.168.123.107:0/1287837008' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "9dec1d82-be8f-40f2-9e9a-cbb3b644be63"}]': finished 2026-03-07T10:16:08.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:08 vm04 ceph-mon[49935]: osdmap e27: 6 total, 4 up, 6 in 2026-03-07T10:16:08.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:08 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-07T10:16:08.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:08 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-07T10:16:08.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:08 vm04 ceph-mon[49935]: from='osd.4 [v2:192.168.123.107:6800/3003059232,v1:192.168.123.107:6801/3003059232]' entity='osd.4' 2026-03-07T10:16:08.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:08 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-07T10:16:08.381 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:08 vm07 ceph-mon[68568]: from='client.24218 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm07:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:16:08.381 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:08 vm07 ceph-mon[68568]: from='osd.4 [v2:192.168.123.107:6800/3003059232,v1:192.168.123.107:6801/3003059232]' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm07", "root=default"]}]': finished 2026-03-07T10:16:08.381 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:08 vm07 ceph-mon[68568]: osdmap e26: 5 total, 4 up, 5 in 2026-03-07T10:16:08.381 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:08 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-07T10:16:08.381 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:08 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-07T10:16:08.381 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:08 vm07 ceph-mon[68568]: from='client.? 192.168.123.107:0/1287837008' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "9dec1d82-be8f-40f2-9e9a-cbb3b644be63"}]: dispatch 2026-03-07T10:16:08.381 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:08 vm07 ceph-mon[68568]: from='client.? 192.168.123.107:0/1287837008' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "9dec1d82-be8f-40f2-9e9a-cbb3b644be63"}]': finished 2026-03-07T10:16:08.381 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:08 vm07 ceph-mon[68568]: osdmap e27: 6 total, 4 up, 6 in 2026-03-07T10:16:08.381 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:08 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-07T10:16:08.381 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:08 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-07T10:16:08.381 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:08 vm07 ceph-mon[68568]: from='osd.4 [v2:192.168.123.107:6800/3003059232,v1:192.168.123.107:6801/3003059232]' entity='osd.4' 2026-03-07T10:16:08.381 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:08 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-07T10:16:08.381 INFO:journalctl@ceph.osd.4.vm07.stdout:Mar 07 10:16:07 vm07 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-4[72943]: 2026-03-07T10:16:07.951+0000 7fefd8976640 -1 osd.4 0 waiting for initial osdmap 2026-03-07T10:16:08.381 INFO:journalctl@ceph.osd.4.vm07.stdout:Mar 07 10:16:07 vm07 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-4[72943]: 2026-03-07T10:16:07.967+0000 7fefd378c640 -1 osd.4 27 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-07T10:16:08.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:08 vm01 ceph-mon[49602]: from='client.24218 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm07:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:16:08.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:08 vm01 ceph-mon[49602]: from='osd.4 [v2:192.168.123.107:6800/3003059232,v1:192.168.123.107:6801/3003059232]' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm07", "root=default"]}]': finished 2026-03-07T10:16:08.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:08 vm01 ceph-mon[49602]: osdmap e26: 5 total, 4 up, 5 in 2026-03-07T10:16:08.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:08 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-07T10:16:08.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:08 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-07T10:16:08.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:08 vm01 ceph-mon[49602]: from='client.? 192.168.123.107:0/1287837008' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "9dec1d82-be8f-40f2-9e9a-cbb3b644be63"}]: dispatch 2026-03-07T10:16:08.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:08 vm01 ceph-mon[49602]: from='client.? 192.168.123.107:0/1287837008' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "9dec1d82-be8f-40f2-9e9a-cbb3b644be63"}]': finished 2026-03-07T10:16:08.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:08 vm01 ceph-mon[49602]: osdmap e27: 6 total, 4 up, 6 in 2026-03-07T10:16:08.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:08 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-07T10:16:08.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:08 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-07T10:16:08.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:08 vm01 ceph-mon[49602]: from='osd.4 [v2:192.168.123.107:6800/3003059232,v1:192.168.123.107:6801/3003059232]' entity='osd.4' 2026-03-07T10:16:08.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:08 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-07T10:16:09.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:09 vm04 ceph-mon[49935]: purged_snaps scrub starts 2026-03-07T10:16:09.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:09 vm04 ceph-mon[49935]: purged_snaps scrub ok 2026-03-07T10:16:09.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:09 vm04 ceph-mon[49935]: pgmap v64: 1 pgs: 1 active+clean; 449 KiB data, 107 MiB used, 80 GiB / 80 GiB avail; 56 KiB/s, 0 objects/s recovering 2026-03-07T10:16:09.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:09 vm04 ceph-mon[49935]: from='client.? 192.168.123.107:0/3289240981' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-07T10:16:09.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:09 vm04 ceph-mon[49935]: osd.4 [v2:192.168.123.107:6800/3003059232,v1:192.168.123.107:6801/3003059232] boot 2026-03-07T10:16:09.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:09 vm04 ceph-mon[49935]: osdmap e28: 6 total, 5 up, 6 in 2026-03-07T10:16:09.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:09 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-07T10:16:09.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:09 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-07T10:16:09.432 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:09 vm07 ceph-mon[68568]: purged_snaps scrub starts 2026-03-07T10:16:09.432 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:09 vm07 ceph-mon[68568]: purged_snaps scrub ok 2026-03-07T10:16:09.432 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:09 vm07 ceph-mon[68568]: pgmap v64: 1 pgs: 1 active+clean; 449 KiB data, 107 MiB used, 80 GiB / 80 GiB avail; 56 KiB/s, 0 objects/s recovering 2026-03-07T10:16:09.432 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:09 vm07 ceph-mon[68568]: from='client.? 192.168.123.107:0/3289240981' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-07T10:16:09.432 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:09 vm07 ceph-mon[68568]: osd.4 [v2:192.168.123.107:6800/3003059232,v1:192.168.123.107:6801/3003059232] boot 2026-03-07T10:16:09.432 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:09 vm07 ceph-mon[68568]: osdmap e28: 6 total, 5 up, 6 in 2026-03-07T10:16:09.432 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:09 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-07T10:16:09.432 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:09 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-07T10:16:09.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:09 vm01 ceph-mon[49602]: purged_snaps scrub starts 2026-03-07T10:16:09.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:09 vm01 ceph-mon[49602]: purged_snaps scrub ok 2026-03-07T10:16:09.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:09 vm01 ceph-mon[49602]: pgmap v64: 1 pgs: 1 active+clean; 449 KiB data, 107 MiB used, 80 GiB / 80 GiB avail; 56 KiB/s, 0 objects/s recovering 2026-03-07T10:16:09.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:09 vm01 ceph-mon[49602]: from='client.? 192.168.123.107:0/3289240981' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-07T10:16:09.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:09 vm01 ceph-mon[49602]: osd.4 [v2:192.168.123.107:6800/3003059232,v1:192.168.123.107:6801/3003059232] boot 2026-03-07T10:16:09.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:09 vm01 ceph-mon[49602]: osdmap e28: 6 total, 5 up, 6 in 2026-03-07T10:16:09.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:09 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-07T10:16:09.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:09 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-07T10:16:11.136 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:10 vm07 ceph-mon[68568]: pgmap v67: 1 pgs: 1 active+clean; 449 KiB data, 133 MiB used, 100 GiB / 100 GiB avail 2026-03-07T10:16:11.136 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:10 vm07 ceph-mon[68568]: osdmap e29: 6 total, 5 up, 6 in 2026-03-07T10:16:11.136 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:10 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-07T10:16:11.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:10 vm01 ceph-mon[49602]: pgmap v67: 1 pgs: 1 active+clean; 449 KiB data, 133 MiB used, 100 GiB / 100 GiB avail 2026-03-07T10:16:11.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:10 vm01 ceph-mon[49602]: osdmap e29: 6 total, 5 up, 6 in 2026-03-07T10:16:11.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:10 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-07T10:16:11.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:10 vm04 ceph-mon[49935]: pgmap v67: 1 pgs: 1 active+clean; 449 KiB data, 133 MiB used, 100 GiB / 100 GiB avail 2026-03-07T10:16:11.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:10 vm04 ceph-mon[49935]: osdmap e29: 6 total, 5 up, 6 in 2026-03-07T10:16:11.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:10 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-07T10:16:11.928 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:11 vm07 ceph-mon[68568]: osdmap e30: 6 total, 5 up, 6 in 2026-03-07T10:16:11.928 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:11 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-07T10:16:12.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:11 vm01 ceph-mon[49602]: osdmap e30: 6 total, 5 up, 6 in 2026-03-07T10:16:12.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:11 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-07T10:16:12.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:11 vm04 ceph-mon[49935]: osdmap e30: 6 total, 5 up, 6 in 2026-03-07T10:16:12.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:11 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-07T10:16:13.182 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:12 vm07 ceph-mon[68568]: pgmap v70: 1 pgs: 1 peering; 449 KiB data, 133 MiB used, 100 GiB / 100 GiB avail 2026-03-07T10:16:13.182 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:12 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-07T10:16:13.182 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:12 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:13.182 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:12 vm07 ceph-mon[68568]: Deploying daemon osd.5 on vm07 2026-03-07T10:16:13.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:12 vm01 ceph-mon[49602]: pgmap v70: 1 pgs: 1 peering; 449 KiB data, 133 MiB used, 100 GiB / 100 GiB avail 2026-03-07T10:16:13.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:12 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-07T10:16:13.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:12 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:13.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:12 vm01 ceph-mon[49602]: Deploying daemon osd.5 on vm07 2026-03-07T10:16:13.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:12 vm04 ceph-mon[49935]: pgmap v70: 1 pgs: 1 peering; 449 KiB data, 133 MiB used, 100 GiB / 100 GiB avail 2026-03-07T10:16:13.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:12 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-07T10:16:13.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:12 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:13.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:12 vm04 ceph-mon[49935]: Deploying daemon osd.5 on vm07 2026-03-07T10:16:15.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:14 vm01 ceph-mon[49602]: pgmap v71: 1 pgs: 1 peering; 449 KiB data, 134 MiB used, 100 GiB / 100 GiB avail 2026-03-07T10:16:15.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:14 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:15.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:14 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:15.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:14 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:16:15.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:14 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:15.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:14 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:15.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:14 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:16:15.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:14 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:15.315 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:14 vm07 ceph-mon[68568]: pgmap v71: 1 pgs: 1 peering; 449 KiB data, 134 MiB used, 100 GiB / 100 GiB avail 2026-03-07T10:16:15.315 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:14 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:15.315 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:14 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:15.315 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:14 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:16:15.315 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:14 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:15.315 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:14 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:15.315 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:14 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:16:15.315 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:14 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:15.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:14 vm04 ceph-mon[49935]: pgmap v71: 1 pgs: 1 peering; 449 KiB data, 134 MiB used, 100 GiB / 100 GiB avail 2026-03-07T10:16:15.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:14 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:15.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:14 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:15.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:14 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:16:15.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:14 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:15.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:14 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:15.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:14 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:16:15.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:14 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:16.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:15 vm01 ceph-mon[49602]: Detected new or changed devices on vm07 2026-03-07T10:16:16.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:15 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:16:16.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:15 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:16.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:15 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:16:16.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:15 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:16.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:15 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:16.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:15 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:16.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:15 vm04 ceph-mon[49935]: Detected new or changed devices on vm07 2026-03-07T10:16:16.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:15 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:16:16.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:15 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:16.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:15 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:16:16.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:15 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:16.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:15 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:16.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:15 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:16.356 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:15 vm07 ceph-mon[68568]: Detected new or changed devices on vm07 2026-03-07T10:16:16.356 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:15 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:16:16.356 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:15 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:16.356 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:15 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:16:16.357 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:15 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:16.357 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:15 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:16.357 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:15 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:16.819 INFO:teuthology.orchestra.run.vm07.stdout:Created osd(s) 5 on host 'vm07' 2026-03-07T10:16:16.881 DEBUG:teuthology.orchestra.run.vm07:osd.5> sudo journalctl -f -n 0 -u ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@osd.5.service 2026-03-07T10:16:16.882 INFO:tasks.cephadm:Waiting for 6 OSDs to come up... 2026-03-07T10:16:16.882 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph osd stat -f json 2026-03-07T10:16:17.071 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:16:17.158 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:16 vm01 ceph-mon[49602]: pgmap v72: 1 pgs: 1 active+clean; 449 KiB data, 134 MiB used, 100 GiB / 100 GiB avail; 66 KiB/s, 0 objects/s recovering 2026-03-07T10:16:17.158 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:16 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:17.158 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:16 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:17.158 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:16 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:16:17.158 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:16 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:17.158 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:16 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:16:17.158 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:16 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:17.158 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:16 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:17.158 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:16 vm01 ceph-mon[49602]: from='osd.5 [v2:192.168.123.107:6808/1009376405,v1:192.168.123.107:6809/1009376405]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-07T10:16:17.158 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:16 vm01 ceph-mon[49602]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-07T10:16:17.158 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:16 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:16:17.158 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:16 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:16:17.158 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:16 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:17.158 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:16 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:16:17.158 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:16 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:17.158 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:16 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:17.158 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:16 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:17.158 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:16 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:16:17.158 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:16 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:17.182 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:16 vm07 ceph-mon[68568]: pgmap v72: 1 pgs: 1 active+clean; 449 KiB data, 134 MiB used, 100 GiB / 100 GiB avail; 66 KiB/s, 0 objects/s recovering 2026-03-07T10:16:17.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:16 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:17.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:16 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:17.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:16 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:16:17.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:16 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:17.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:16 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:16:17.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:16 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:17.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:16 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:17.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:16 vm07 ceph-mon[68568]: from='osd.5 [v2:192.168.123.107:6808/1009376405,v1:192.168.123.107:6809/1009376405]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-07T10:16:17.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:16 vm07 ceph-mon[68568]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-07T10:16:17.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:16 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:16:17.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:16 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:16:17.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:16 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:17.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:16 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:16:17.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:16 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:17.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:16 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:17.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:16 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:17.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:16 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:16:17.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:16 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:17.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:16 vm04 ceph-mon[49935]: pgmap v72: 1 pgs: 1 active+clean; 449 KiB data, 134 MiB used, 100 GiB / 100 GiB avail; 66 KiB/s, 0 objects/s recovering 2026-03-07T10:16:17.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:16 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:17.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:16 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:17.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:16 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:16:17.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:16 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:17.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:16 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:16:17.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:16 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:17.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:16 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:17.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:16 vm04 ceph-mon[49935]: from='osd.5 [v2:192.168.123.107:6808/1009376405,v1:192.168.123.107:6809/1009376405]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-07T10:16:17.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:16 vm04 ceph-mon[49935]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-07T10:16:17.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:16 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:16:17.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:16 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:16:17.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:16 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:17.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:16 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-07T10:16:17.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:16 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:17.351 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:16 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:17.351 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:16 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:17.351 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:16 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:16:17.351 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:16 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:17.511 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-07T10:16:17.570 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":31,"num_osds":6,"num_up_osds":5,"osd_up_since":1772878568,"num_in_osds":6,"osd_in_since":1772878567,"num_remapped_pgs":0} 2026-03-07T10:16:18.182 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:17 vm07 ceph-mon[68568]: Adjusting osd_memory_target on vm07 to 1664M 2026-03-07T10:16:18.182 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:17 vm07 ceph-mon[68568]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-07T10:16:18.182 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:17 vm07 ceph-mon[68568]: osdmap e31: 6 total, 5 up, 6 in 2026-03-07T10:16:18.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:17 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-07T10:16:18.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:17 vm07 ceph-mon[68568]: from='osd.5 [v2:192.168.123.107:6808/1009376405,v1:192.168.123.107:6809/1009376405]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm07", "root=default"]}]: dispatch 2026-03-07T10:16:18.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:17 vm07 ceph-mon[68568]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm07", "root=default"]}]: dispatch 2026-03-07T10:16:18.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:17 vm07 ceph-mon[68568]: from='client.? 192.168.123.101:0/1469147614' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-07T10:16:18.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:17 vm01 ceph-mon[49602]: Adjusting osd_memory_target on vm07 to 1664M 2026-03-07T10:16:18.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:17 vm01 ceph-mon[49602]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-07T10:16:18.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:17 vm01 ceph-mon[49602]: osdmap e31: 6 total, 5 up, 6 in 2026-03-07T10:16:18.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:17 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-07T10:16:18.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:17 vm01 ceph-mon[49602]: from='osd.5 [v2:192.168.123.107:6808/1009376405,v1:192.168.123.107:6809/1009376405]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm07", "root=default"]}]: dispatch 2026-03-07T10:16:18.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:17 vm01 ceph-mon[49602]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm07", "root=default"]}]: dispatch 2026-03-07T10:16:18.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:17 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/1469147614' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-07T10:16:18.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:17 vm04 ceph-mon[49935]: Adjusting osd_memory_target on vm07 to 1664M 2026-03-07T10:16:18.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:17 vm04 ceph-mon[49935]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-07T10:16:18.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:17 vm04 ceph-mon[49935]: osdmap e31: 6 total, 5 up, 6 in 2026-03-07T10:16:18.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:17 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-07T10:16:18.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:17 vm04 ceph-mon[49935]: from='osd.5 [v2:192.168.123.107:6808/1009376405,v1:192.168.123.107:6809/1009376405]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm07", "root=default"]}]: dispatch 2026-03-07T10:16:18.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:17 vm04 ceph-mon[49935]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm07", "root=default"]}]: dispatch 2026-03-07T10:16:18.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:17 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/1469147614' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-07T10:16:18.571 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph osd stat -f json 2026-03-07T10:16:18.683 INFO:journalctl@ceph.osd.5.vm07.stdout:Mar 07 10:16:18 vm07 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-5[75975]: 2026-03-07T10:16:18.234+0000 7f9914cb7640 -1 osd.5 0 waiting for initial osdmap 2026-03-07T10:16:18.683 INFO:journalctl@ceph.osd.5.vm07.stdout:Mar 07 10:16:18 vm07 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-5[75975]: 2026-03-07T10:16:18.240+0000 7f99102ce640 -1 osd.5 32 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-07T10:16:18.759 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:16:19.111 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-07T10:16:19.111 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:18 vm01 ceph-mon[49602]: pgmap v74: 1 pgs: 1 active+clean; 449 KiB data, 134 MiB used, 100 GiB / 100 GiB avail; 58 KiB/s, 0 objects/s recovering 2026-03-07T10:16:19.111 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:18 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:19.111 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:18 vm01 ceph-mon[49602]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm07", "root=default"]}]': finished 2026-03-07T10:16:19.111 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:18 vm01 ceph-mon[49602]: osdmap e32: 6 total, 5 up, 6 in 2026-03-07T10:16:19.111 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:18 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-07T10:16:19.111 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:18 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:19.111 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:18 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-07T10:16:19.111 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:18 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:19.180 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":32,"num_osds":6,"num_up_osds":5,"osd_up_since":1772878568,"num_in_osds":6,"osd_in_since":1772878567,"num_remapped_pgs":0} 2026-03-07T10:16:19.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:18 vm07 ceph-mon[68568]: pgmap v74: 1 pgs: 1 active+clean; 449 KiB data, 134 MiB used, 100 GiB / 100 GiB avail; 58 KiB/s, 0 objects/s recovering 2026-03-07T10:16:19.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:18 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:19.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:18 vm07 ceph-mon[68568]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm07", "root=default"]}]': finished 2026-03-07T10:16:19.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:18 vm07 ceph-mon[68568]: osdmap e32: 6 total, 5 up, 6 in 2026-03-07T10:16:19.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:18 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-07T10:16:19.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:18 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:19.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:18 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-07T10:16:19.183 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:18 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:19.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:18 vm04 ceph-mon[49935]: pgmap v74: 1 pgs: 1 active+clean; 449 KiB data, 134 MiB used, 100 GiB / 100 GiB avail; 58 KiB/s, 0 objects/s recovering 2026-03-07T10:16:19.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:18 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:19.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:18 vm04 ceph-mon[49935]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm07", "root=default"]}]': finished 2026-03-07T10:16:19.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:18 vm04 ceph-mon[49935]: osdmap e32: 6 total, 5 up, 6 in 2026-03-07T10:16:19.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:18 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-07T10:16:19.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:18 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:19.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:18 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-07T10:16:19.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:18 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:20.181 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph osd stat -f json 2026-03-07T10:16:20.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:19 vm01 ceph-mon[49602]: purged_snaps scrub starts 2026-03-07T10:16:20.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:19 vm01 ceph-mon[49602]: purged_snaps scrub ok 2026-03-07T10:16:20.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:19 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/2410776040' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-07T10:16:20.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:19 vm01 ceph-mon[49602]: osd.5 [v2:192.168.123.107:6808/1009376405,v1:192.168.123.107:6809/1009376405] boot 2026-03-07T10:16:20.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:19 vm01 ceph-mon[49602]: osdmap e33: 6 total, 6 up, 6 in 2026-03-07T10:16:20.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:19 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-07T10:16:20.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:19 vm04 ceph-mon[49935]: purged_snaps scrub starts 2026-03-07T10:16:20.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:19 vm04 ceph-mon[49935]: purged_snaps scrub ok 2026-03-07T10:16:20.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:19 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/2410776040' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-07T10:16:20.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:19 vm04 ceph-mon[49935]: osd.5 [v2:192.168.123.107:6808/1009376405,v1:192.168.123.107:6809/1009376405] boot 2026-03-07T10:16:20.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:19 vm04 ceph-mon[49935]: osdmap e33: 6 total, 6 up, 6 in 2026-03-07T10:16:20.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:19 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-07T10:16:20.370 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:16:20.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:19 vm07 ceph-mon[68568]: purged_snaps scrub starts 2026-03-07T10:16:20.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:19 vm07 ceph-mon[68568]: purged_snaps scrub ok 2026-03-07T10:16:20.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:19 vm07 ceph-mon[68568]: from='client.? 192.168.123.101:0/2410776040' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-07T10:16:20.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:19 vm07 ceph-mon[68568]: osd.5 [v2:192.168.123.107:6808/1009376405,v1:192.168.123.107:6809/1009376405] boot 2026-03-07T10:16:20.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:19 vm07 ceph-mon[68568]: osdmap e33: 6 total, 6 up, 6 in 2026-03-07T10:16:20.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:19 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-07T10:16:20.702 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-07T10:16:20.774 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":34,"num_osds":6,"num_up_osds":6,"osd_up_since":1772878579,"num_in_osds":6,"osd_in_since":1772878567,"num_remapped_pgs":0} 2026-03-07T10:16:20.775 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph osd dump --format=json 2026-03-07T10:16:20.957 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:16:21.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:20 vm01 ceph-mon[49602]: pgmap v77: 1 pgs: 1 active+clean; 449 KiB data, 160 MiB used, 120 GiB / 120 GiB avail; 75 KiB/s, 0 objects/s recovering 2026-03-07T10:16:21.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:20 vm01 ceph-mon[49602]: osdmap e34: 6 total, 6 up, 6 in 2026-03-07T10:16:21.226 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:20 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/2112459464' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-07T10:16:21.284 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-07T10:16:21.284 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":35,"fsid":"3fd6e214-1a0e-11f1-b256-99cfc35f3328","created":"2026-03-07T10:13:49.200752+0000","modified":"2026-03-07T10:16:21.236435+0000","last_up_change":"2026-03-07T10:16:19.224891+0000","last_in_change":"2026-03-07T10:16:07.902065+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":14,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":6,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"squid","allow_crimson":false,"pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-07T10:15:41.708136+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"is_stretch_pool":false,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"19","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}},"read_balance":{"score_type":"Fair distribution","score_acting":6,"score_stable":6,"optimal_score":0.5,"raw_score_acting":3,"raw_score_stable":3,"primary_affinity_weighted":1,"average_primary_affinity":1,"average_primary_affinity_weighted":1}}],"osds":[{"osd":0,"uuid":"2672a018-9301-4f8e-b634-bac5c79f0203","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":8,"up_thru":22,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6802","nonce":4213759478},{"type":"v1","addr":"192.168.123.101:6803","nonce":4213759478}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6804","nonce":4213759478},{"type":"v1","addr":"192.168.123.101:6805","nonce":4213759478}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6808","nonce":4213759478},{"type":"v1","addr":"192.168.123.101:6809","nonce":4213759478}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6806","nonce":4213759478},{"type":"v1","addr":"192.168.123.101:6807","nonce":4213759478}]},"public_addr":"192.168.123.101:6803/4213759478","cluster_addr":"192.168.123.101:6805/4213759478","heartbeat_back_addr":"192.168.123.101:6809/4213759478","heartbeat_front_addr":"192.168.123.101:6807/4213759478","state":["exists","up"]},{"osd":1,"uuid":"930d5f0c-1a5a-4d6c-b82c-8410fdb0227e","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":12,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6810","nonce":2110476213},{"type":"v1","addr":"192.168.123.101:6811","nonce":2110476213}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6812","nonce":2110476213},{"type":"v1","addr":"192.168.123.101:6813","nonce":2110476213}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6816","nonce":2110476213},{"type":"v1","addr":"192.168.123.101:6817","nonce":2110476213}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6814","nonce":2110476213},{"type":"v1","addr":"192.168.123.101:6815","nonce":2110476213}]},"public_addr":"192.168.123.101:6811/2110476213","cluster_addr":"192.168.123.101:6813/2110476213","heartbeat_back_addr":"192.168.123.101:6817/2110476213","heartbeat_front_addr":"192.168.123.101:6815/2110476213","state":["exists","up"]},{"osd":2,"uuid":"9403c18e-0c42-46a6-a584-58f450d0eb95","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":17,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6800","nonce":3187441749},{"type":"v1","addr":"192.168.123.104:6801","nonce":3187441749}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6802","nonce":3187441749},{"type":"v1","addr":"192.168.123.104:6803","nonce":3187441749}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6806","nonce":3187441749},{"type":"v1","addr":"192.168.123.104:6807","nonce":3187441749}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6804","nonce":3187441749},{"type":"v1","addr":"192.168.123.104:6805","nonce":3187441749}]},"public_addr":"192.168.123.104:6801/3187441749","cluster_addr":"192.168.123.104:6803/3187441749","heartbeat_back_addr":"192.168.123.104:6807/3187441749","heartbeat_front_addr":"192.168.123.104:6805/3187441749","state":["exists","up"]},{"osd":3,"uuid":"155a69d2-3a90-4797-870f-0d55995439d5","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":22,"up_thru":34,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6808","nonce":1533780217},{"type":"v1","addr":"192.168.123.104:6809","nonce":1533780217}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6810","nonce":1533780217},{"type":"v1","addr":"192.168.123.104:6811","nonce":1533780217}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6814","nonce":1533780217},{"type":"v1","addr":"192.168.123.104:6815","nonce":1533780217}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6812","nonce":1533780217},{"type":"v1","addr":"192.168.123.104:6813","nonce":1533780217}]},"public_addr":"192.168.123.104:6809/1533780217","cluster_addr":"192.168.123.104:6811/1533780217","heartbeat_back_addr":"192.168.123.104:6815/1533780217","heartbeat_front_addr":"192.168.123.104:6813/1533780217","state":["exists","up"]},{"osd":4,"uuid":"654075eb-fb5d-47cc-abb4-791f434384d1","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":28,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6800","nonce":3003059232},{"type":"v1","addr":"192.168.123.107:6801","nonce":3003059232}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6802","nonce":3003059232},{"type":"v1","addr":"192.168.123.107:6803","nonce":3003059232}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6806","nonce":3003059232},{"type":"v1","addr":"192.168.123.107:6807","nonce":3003059232}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6804","nonce":3003059232},{"type":"v1","addr":"192.168.123.107:6805","nonce":3003059232}]},"public_addr":"192.168.123.107:6801/3003059232","cluster_addr":"192.168.123.107:6803/3003059232","heartbeat_back_addr":"192.168.123.107:6807/3003059232","heartbeat_front_addr":"192.168.123.107:6805/3003059232","state":["exists","up"]},{"osd":5,"uuid":"9dec1d82-be8f-40f2-9e9a-cbb3b644be63","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":33,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6808","nonce":1009376405},{"type":"v1","addr":"192.168.123.107:6809","nonce":1009376405}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6810","nonce":1009376405},{"type":"v1","addr":"192.168.123.107:6811","nonce":1009376405}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6814","nonce":1009376405},{"type":"v1","addr":"192.168.123.107:6815","nonce":1009376405}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6812","nonce":1009376405},{"type":"v1","addr":"192.168.123.107:6813","nonce":1009376405}]},"public_addr":"192.168.123.107:6809/1009376405","cluster_addr":"192.168.123.107:6811/1009376405","heartbeat_back_addr":"192.168.123.107:6815/1009376405","heartbeat_front_addr":"192.168.123.107:6813/1009376405","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-07T10:15:17.245690+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-07T10:15:29.032199+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-07T10:15:40.274573+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-07T10:15:53.414722+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-07T10:16:06.017105+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-07T10:16:17.380660+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.101:0/2057860323":"2026-03-08T10:14:31.637587+0000","192.168.123.101:6801/121993066":"2026-03-08T10:14:31.637587+0000","192.168.123.101:6800/121993066":"2026-03-08T10:14:31.637587+0000","192.168.123.101:0/3859697181":"2026-03-08T10:14:31.637587+0000","192.168.123.101:6800/1072916292":"2026-03-08T10:14:13.780842+0000","192.168.123.101:0/748929155":"2026-03-08T10:14:31.637587+0000","192.168.123.101:0/783816649":"2026-03-08T10:14:13.780842+0000","192.168.123.101:6801/1072916292":"2026-03-08T10:14:13.780842+0000","192.168.123.101:0/2002072881":"2026-03-08T10:14:13.780842+0000","192.168.123.101:0/3516706839":"2026-03-08T10:14:13.780842+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-07T10:16:21.341 INFO:tasks.cephadm.ceph_manager.ceph:[{'pool': 1, 'pool_name': '.mgr', 'create_time': '2026-03-07T10:15:41.708136+0000', 'flags': 1, 'flags_names': 'hashpspool', 'type': 1, 'size': 3, 'min_size': 2, 'crush_rule': 0, 'peering_crush_bucket_count': 0, 'peering_crush_bucket_target': 0, 'peering_crush_bucket_barrier': 0, 'peering_crush_bucket_mandatory_member': 2147483647, 'is_stretch_pool': False, 'object_hash': 2, 'pg_autoscale_mode': 'off', 'pg_num': 1, 'pg_placement_num': 1, 'pg_placement_num_target': 1, 'pg_num_target': 1, 'pg_num_pending': 1, 'last_pg_merge_meta': {'source_pgid': '0.0', 'ready_epoch': 0, 'last_epoch_started': 0, 'last_epoch_clean': 0, 'source_version': "0'0", 'target_version': "0'0"}, 'last_change': '19', 'last_force_op_resend': '0', 'last_force_op_resend_prenautilus': '0', 'last_force_op_resend_preluminous': '0', 'auid': 0, 'snap_mode': 'selfmanaged', 'snap_seq': 0, 'snap_epoch': 0, 'pool_snaps': [], 'removed_snaps': '[]', 'quota_max_bytes': 0, 'quota_max_objects': 0, 'tiers': [], 'tier_of': -1, 'read_tier': -1, 'write_tier': -1, 'cache_mode': 'none', 'target_max_bytes': 0, 'target_max_objects': 0, 'cache_target_dirty_ratio_micro': 400000, 'cache_target_dirty_high_ratio_micro': 600000, 'cache_target_full_ratio_micro': 800000, 'cache_min_flush_age': 0, 'cache_min_evict_age': 0, 'erasure_code_profile': '', 'hit_set_params': {'type': 'none'}, 'hit_set_period': 0, 'hit_set_count': 0, 'use_gmt_hitset': True, 'min_read_recency_for_promote': 0, 'min_write_recency_for_promote': 0, 'hit_set_grade_decay_rate': 0, 'hit_set_search_last_n': 0, 'grade_table': [], 'stripe_width': 0, 'expected_num_objects': 0, 'fast_read': False, 'options': {'pg_num_max': 32, 'pg_num_min': 1}, 'application_metadata': {'mgr': {}}, 'read_balance': {'score_type': 'Fair distribution', 'score_acting': 6, 'score_stable': 6, 'optimal_score': 0.5, 'raw_score_acting': 3, 'raw_score_stable': 3, 'primary_affinity_weighted': 1, 'average_primary_affinity': 1, 'average_primary_affinity_weighted': 1}}] 2026-03-07T10:16:21.341 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph osd pool get .mgr pg_num 2026-03-07T10:16:21.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:20 vm04 ceph-mon[49935]: pgmap v77: 1 pgs: 1 active+clean; 449 KiB data, 160 MiB used, 120 GiB / 120 GiB avail; 75 KiB/s, 0 objects/s recovering 2026-03-07T10:16:21.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:20 vm04 ceph-mon[49935]: osdmap e34: 6 total, 6 up, 6 in 2026-03-07T10:16:21.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:20 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/2112459464' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-07T10:16:21.432 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:20 vm07 ceph-mon[68568]: pgmap v77: 1 pgs: 1 active+clean; 449 KiB data, 160 MiB used, 120 GiB / 120 GiB avail; 75 KiB/s, 0 objects/s recovering 2026-03-07T10:16:21.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:20 vm07 ceph-mon[68568]: osdmap e34: 6 total, 6 up, 6 in 2026-03-07T10:16:21.433 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:20 vm07 ceph-mon[68568]: from='client.? 192.168.123.101:0/2112459464' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-07T10:16:21.519 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:16:21.866 INFO:teuthology.orchestra.run.vm01.stdout:pg_num: 1 2026-03-07T10:16:21.940 INFO:tasks.cephadm:Setting up client nodes... 2026-03-07T10:16:21.941 INFO:tasks.ceph:Waiting until ceph daemons up and pgs clean... 2026-03-07T10:16:21.941 INFO:tasks.cephadm.ceph_manager.ceph:waiting for mgr available 2026-03-07T10:16:21.941 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph mgr dump --format=json 2026-03-07T10:16:22.130 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:16:22.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:22 vm01 ceph-mon[49602]: osdmap e35: 6 total, 6 up, 6 in 2026-03-07T10:16:22.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:22 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/2864041644' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-07T10:16:22.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:22 vm01 ceph-mon[49602]: pgmap v80: 1 pgs: 1 remapped+peering; 449 KiB data, 160 MiB used, 120 GiB / 120 GiB avail 2026-03-07T10:16:22.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:22 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/2513180051' entity='client.admin' cmd=[{"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"}]: dispatch 2026-03-07T10:16:22.504 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-07T10:16:22.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:22 vm04 ceph-mon[49935]: osdmap e35: 6 total, 6 up, 6 in 2026-03-07T10:16:22.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:22 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/2864041644' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-07T10:16:22.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:22 vm04 ceph-mon[49935]: pgmap v80: 1 pgs: 1 remapped+peering; 449 KiB data, 160 MiB used, 120 GiB / 120 GiB avail 2026-03-07T10:16:22.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:22 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/2513180051' entity='client.admin' cmd=[{"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"}]: dispatch 2026-03-07T10:16:22.682 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:22 vm07 ceph-mon[68568]: osdmap e35: 6 total, 6 up, 6 in 2026-03-07T10:16:22.682 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:22 vm07 ceph-mon[68568]: from='client.? 192.168.123.101:0/2864041644' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-07T10:16:22.682 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:22 vm07 ceph-mon[68568]: pgmap v80: 1 pgs: 1 remapped+peering; 449 KiB data, 160 MiB used, 120 GiB / 120 GiB avail 2026-03-07T10:16:22.682 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:22 vm07 ceph-mon[68568]: from='client.? 192.168.123.101:0/2513180051' entity='client.admin' cmd=[{"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"}]: dispatch 2026-03-07T10:16:22.878 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":15,"flags":0,"active_gid":14156,"active_name":"a","active_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6800","nonce":2746025301},{"type":"v1","addr":"192.168.123.101:6801","nonce":2746025301}]},"active_addr":"192.168.123.101:6801/2746025301","active_change":"2026-03-07T10:14:31.637685+0000","active_mgr_features":4540701547738038271,"available":true,"standbys":[{"gid":24103,"name":"b","mgr_features":4540701547738038271,"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2359","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","read","upmap","upmap-read"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"update_pg_upmap_activity":{"name":"update_pg_upmap_activity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Updates pg_upmap activity stats to be used in `balancer status detail`","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"cephadm_log_destination":{"name":"cephadm_log_destination","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":["file","file,syslog","syslog"],"desc":"Destination for cephadm command's persistent logging","long_desc":"","tags":[],"see_also":[]},"cgroups_split":{"name":"cgroups_split","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Pass --cgroups=split when cephadm creates containers (currently podman only)","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.25.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_elasticsearch":{"name":"container_image_elasticsearch","type":"str","level":"advanced","flags":0,"default_value":"quay.io/omrizeneva/elasticsearch:6.8.23","min":"","max":"","enum_allowed":[],"desc":"elasticsearch container image","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/grafana:10.4.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"HAproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_agent":{"name":"container_image_jaeger_agent","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-agent:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger agent container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_collector":{"name":"container_image_jaeger_collector","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-collector:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger collector container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_query":{"name":"container_image_jaeger_query","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-query:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger query container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/keepalived:2.2.4","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_loki":{"name":"container_image_loki","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/loki:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Loki container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.7.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_nvmeof":{"name":"container_image_nvmeof","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/nvmeof:1.2.5","min":"","max":"","enum_allowed":[],"desc":"Nvme-of container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v2.51.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_promtail":{"name":"container_image_promtail","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/promtail:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Promtail container image","long_desc":"","tags":[],"see_also":[]},"container_image_samba":{"name":"container_image_samba","type":"str","level":"advanced","flags":0,"default_value":"quay.io/samba.org/samba-server:devbuilds-centos-amd64","min":"","max":"","enum_allowed":[],"desc":"Samba/SMB container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"SNMP Gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_cephadm_command_timeout":{"name":"default_cephadm_command_timeout","type":"int","level":"advanced","flags":0,"default_value":"900","min":"","max":"","enum_allowed":[],"desc":"Default timeout applied to cephadm commands run directly on the host (in seconds)","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"quay.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"grafana_dashboards_path":{"name":"grafana_dashboards_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/grafana/dashboards/ceph-dashboard/","min":"","max":"","enum_allowed":[],"desc":"location of dashboards to include in grafana deployments","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"hw_monitoring":{"name":"hw_monitoring","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Deploy hw monitoring daemon on every host.","long_desc":"","tags":[],"see_also":[]},"inventory_list_all":{"name":"inventory_list_all","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Whether ceph-volume inventory should report more devices (mostly mappers (LVs / mpaths), partitions...)","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_refresh_metadata":{"name":"log_refresh_metadata","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Log all refresh metadata. Includes daemon, device, and host info collected regularly. Only has effect if logging at debug level","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"oob_default_addr":{"name":"oob_default_addr","type":"str","level":"advanced","flags":0,"default_value":"169.254.1.1","min":"","max":"","enum_allowed":[],"desc":"Default address for RedFish API (oob management).","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"secure_monitoring_stack":{"name":"secure_monitoring_stack","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable TLS security for all the monitoring stack daemons","long_desc":"","tags":[],"see_also":[]},"service_discovery_port":{"name":"service_discovery_port","type":"int","level":"advanced","flags":0,"default_value":"8765","min":"","max":"","enum_allowed":[],"desc":"cephadm service discovery port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_count_max":{"name":"ssh_keepalive_count_max","type":"int","level":"advanced","flags":0,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"How many times ssh connections can fail liveness checks before the host is marked offline","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_interval":{"name":"ssh_keepalive_interval","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"How often ssh connections are checked for liveness","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_DASHBOARD":{"name":"FEATURE_TOGGLE_DASHBOARD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"cross_origin_url":{"name":"cross_origin_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"redirect_resolve_ip_addr":{"name":"redirect_resolve_ip_addr","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"fail_fs":{"name":"fail_fs","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Fail filesystem for rapid multi-rank mds upgrade","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"exclude_perf_counters":{"name":"exclude_perf_counters","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Do not include perf-counters in the metrics output","long_desc":"Gathering perf-counters from a single Prometheus exporter can degrade ceph-mgr performance, especially in large clusters. Instead, Ceph-exporter daemons are now used by default for perf-counter gathering. This should only be disabled when no ceph-exporters are deployed.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":1,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"restful","can_run":true,"error_string":"","module_options":{"enable_auth":{"name":"enable_auth","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_requests":{"name":"max_requests","type":"int","level":"advanced","flags":0,"default_value":"500","min":"","max":"","enum_allowed":[],"desc":"Maximum number of requests to keep in memory. When new request comes in, the oldest request will be removed if the number of requests exceeds the max request number. if un-finished request is removed, error message will be logged in the ceph-mgr log.","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rgw","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"secondary_zone_period_retry_limit":{"name":"secondary_zone_period_retry_limit","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"RGW module period update retry limit for secondary site","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard_description":{"name":"leaderboard_description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"periodic_async_work":{"name":"periodic_async_work","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Periodically check for async work","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_no_wait":{"name":"snapshot_clone_no_wait","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Reject subvolume clone request when cloner threads are busy","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"zabbix","can_run":true,"error_string":"","module_options":{"discovery_interval":{"name":"discovery_interval","type":"uint","level":"advanced","flags":0,"default_value":"100","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"identifier":{"name":"identifier","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_host":{"name":"zabbix_host","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_port":{"name":"zabbix_port","type":"int","level":"advanced","flags":0,"default_value":"10051","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_sender":{"name":"zabbix_sender","type":"str","level":"advanced","flags":0,"default_value":"/usr/bin/zabbix_sender","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}]}],"modules":["cephadm","dashboard","iostat","nfs","restful"],"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2359","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","read","upmap","upmap-read"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"update_pg_upmap_activity":{"name":"update_pg_upmap_activity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Updates pg_upmap activity stats to be used in `balancer status detail`","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"cephadm_log_destination":{"name":"cephadm_log_destination","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":["file","file,syslog","syslog"],"desc":"Destination for cephadm command's persistent logging","long_desc":"","tags":[],"see_also":[]},"cgroups_split":{"name":"cgroups_split","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Pass --cgroups=split when cephadm creates containers (currently podman only)","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.25.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_elasticsearch":{"name":"container_image_elasticsearch","type":"str","level":"advanced","flags":0,"default_value":"quay.io/omrizeneva/elasticsearch:6.8.23","min":"","max":"","enum_allowed":[],"desc":"elasticsearch container image","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/grafana:10.4.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"HAproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_agent":{"name":"container_image_jaeger_agent","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-agent:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger agent container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_collector":{"name":"container_image_jaeger_collector","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-collector:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger collector container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_query":{"name":"container_image_jaeger_query","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-query:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger query container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/keepalived:2.2.4","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_loki":{"name":"container_image_loki","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/loki:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Loki container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.7.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_nvmeof":{"name":"container_image_nvmeof","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/nvmeof:1.2.5","min":"","max":"","enum_allowed":[],"desc":"Nvme-of container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v2.51.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_promtail":{"name":"container_image_promtail","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/promtail:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Promtail container image","long_desc":"","tags":[],"see_also":[]},"container_image_samba":{"name":"container_image_samba","type":"str","level":"advanced","flags":0,"default_value":"quay.io/samba.org/samba-server:devbuilds-centos-amd64","min":"","max":"","enum_allowed":[],"desc":"Samba/SMB container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"SNMP Gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_cephadm_command_timeout":{"name":"default_cephadm_command_timeout","type":"int","level":"advanced","flags":0,"default_value":"900","min":"","max":"","enum_allowed":[],"desc":"Default timeout applied to cephadm commands run directly on the host (in seconds)","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"quay.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"grafana_dashboards_path":{"name":"grafana_dashboards_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/grafana/dashboards/ceph-dashboard/","min":"","max":"","enum_allowed":[],"desc":"location of dashboards to include in grafana deployments","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"hw_monitoring":{"name":"hw_monitoring","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Deploy hw monitoring daemon on every host.","long_desc":"","tags":[],"see_also":[]},"inventory_list_all":{"name":"inventory_list_all","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Whether ceph-volume inventory should report more devices (mostly mappers (LVs / mpaths), partitions...)","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_refresh_metadata":{"name":"log_refresh_metadata","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Log all refresh metadata. Includes daemon, device, and host info collected regularly. Only has effect if logging at debug level","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"oob_default_addr":{"name":"oob_default_addr","type":"str","level":"advanced","flags":0,"default_value":"169.254.1.1","min":"","max":"","enum_allowed":[],"desc":"Default address for RedFish API (oob management).","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"secure_monitoring_stack":{"name":"secure_monitoring_stack","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable TLS security for all the monitoring stack daemons","long_desc":"","tags":[],"see_also":[]},"service_discovery_port":{"name":"service_discovery_port","type":"int","level":"advanced","flags":0,"default_value":"8765","min":"","max":"","enum_allowed":[],"desc":"cephadm service discovery port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_count_max":{"name":"ssh_keepalive_count_max","type":"int","level":"advanced","flags":0,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"How many times ssh connections can fail liveness checks before the host is marked offline","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_interval":{"name":"ssh_keepalive_interval","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"How often ssh connections are checked for liveness","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_DASHBOARD":{"name":"FEATURE_TOGGLE_DASHBOARD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"cross_origin_url":{"name":"cross_origin_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"redirect_resolve_ip_addr":{"name":"redirect_resolve_ip_addr","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"fail_fs":{"name":"fail_fs","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Fail filesystem for rapid multi-rank mds upgrade","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"exclude_perf_counters":{"name":"exclude_perf_counters","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Do not include perf-counters in the metrics output","long_desc":"Gathering perf-counters from a single Prometheus exporter can degrade ceph-mgr performance, especially in large clusters. Instead, Ceph-exporter daemons are now used by default for perf-counter gathering. This should only be disabled when no ceph-exporters are deployed.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":1,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"restful","can_run":true,"error_string":"","module_options":{"enable_auth":{"name":"enable_auth","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_requests":{"name":"max_requests","type":"int","level":"advanced","flags":0,"default_value":"500","min":"","max":"","enum_allowed":[],"desc":"Maximum number of requests to keep in memory. When new request comes in, the oldest request will be removed if the number of requests exceeds the max request number. if un-finished request is removed, error message will be logged in the ceph-mgr log.","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rgw","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"secondary_zone_period_retry_limit":{"name":"secondary_zone_period_retry_limit","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"RGW module period update retry limit for secondary site","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard_description":{"name":"leaderboard_description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"periodic_async_work":{"name":"periodic_async_work","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Periodically check for async work","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_no_wait":{"name":"snapshot_clone_no_wait","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Reject subvolume clone request when cloner threads are busy","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"zabbix","can_run":true,"error_string":"","module_options":{"discovery_interval":{"name":"discovery_interval","type":"uint","level":"advanced","flags":0,"default_value":"100","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"identifier":{"name":"identifier","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_host":{"name":"zabbix_host","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_port":{"name":"zabbix_port","type":"int","level":"advanced","flags":0,"default_value":"10051","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_sender":{"name":"zabbix_sender","type":"str","level":"advanced","flags":0,"default_value":"/usr/bin/zabbix_sender","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}],"services":{"dashboard":"https://192.168.123.101:8443/"},"always_on_modules":{"octopus":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"pacific":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"quincy":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"reef":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"squid":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"]},"force_disabled_modules":{},"last_failure_osd_epoch":3,"active_clients":[{"name":"devicehealth","addrvec":[{"type":"v2","addr":"192.168.123.101:0","nonce":427343353}]},{"name":"libcephsqlite","addrvec":[{"type":"v2","addr":"192.168.123.101:0","nonce":2167011662}]},{"name":"rbd_support","addrvec":[{"type":"v2","addr":"192.168.123.101:0","nonce":2182371350}]},{"name":"volumes","addrvec":[{"type":"v2","addr":"192.168.123.101:0","nonce":561677720}]}]} 2026-03-07T10:16:22.880 INFO:tasks.cephadm.ceph_manager.ceph:mgr available! 2026-03-07T10:16:22.880 INFO:tasks.cephadm.ceph_manager.ceph:waiting for all up 2026-03-07T10:16:22.880 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph osd dump --format=json 2026-03-07T10:16:23.058 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:16:23.429 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:23 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/651246273' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-07T10:16:23.429 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-07T10:16:23.429 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":35,"fsid":"3fd6e214-1a0e-11f1-b256-99cfc35f3328","created":"2026-03-07T10:13:49.200752+0000","modified":"2026-03-07T10:16:21.236435+0000","last_up_change":"2026-03-07T10:16:19.224891+0000","last_in_change":"2026-03-07T10:16:07.902065+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":14,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":6,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"squid","allow_crimson":false,"pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-07T10:15:41.708136+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"is_stretch_pool":false,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"19","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}},"read_balance":{"score_type":"Fair distribution","score_acting":6,"score_stable":6,"optimal_score":0.5,"raw_score_acting":3,"raw_score_stable":3,"primary_affinity_weighted":1,"average_primary_affinity":1,"average_primary_affinity_weighted":1}}],"osds":[{"osd":0,"uuid":"2672a018-9301-4f8e-b634-bac5c79f0203","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":8,"up_thru":22,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6802","nonce":4213759478},{"type":"v1","addr":"192.168.123.101:6803","nonce":4213759478}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6804","nonce":4213759478},{"type":"v1","addr":"192.168.123.101:6805","nonce":4213759478}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6808","nonce":4213759478},{"type":"v1","addr":"192.168.123.101:6809","nonce":4213759478}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6806","nonce":4213759478},{"type":"v1","addr":"192.168.123.101:6807","nonce":4213759478}]},"public_addr":"192.168.123.101:6803/4213759478","cluster_addr":"192.168.123.101:6805/4213759478","heartbeat_back_addr":"192.168.123.101:6809/4213759478","heartbeat_front_addr":"192.168.123.101:6807/4213759478","state":["exists","up"]},{"osd":1,"uuid":"930d5f0c-1a5a-4d6c-b82c-8410fdb0227e","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":12,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6810","nonce":2110476213},{"type":"v1","addr":"192.168.123.101:6811","nonce":2110476213}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6812","nonce":2110476213},{"type":"v1","addr":"192.168.123.101:6813","nonce":2110476213}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6816","nonce":2110476213},{"type":"v1","addr":"192.168.123.101:6817","nonce":2110476213}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6814","nonce":2110476213},{"type":"v1","addr":"192.168.123.101:6815","nonce":2110476213}]},"public_addr":"192.168.123.101:6811/2110476213","cluster_addr":"192.168.123.101:6813/2110476213","heartbeat_back_addr":"192.168.123.101:6817/2110476213","heartbeat_front_addr":"192.168.123.101:6815/2110476213","state":["exists","up"]},{"osd":2,"uuid":"9403c18e-0c42-46a6-a584-58f450d0eb95","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":17,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6800","nonce":3187441749},{"type":"v1","addr":"192.168.123.104:6801","nonce":3187441749}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6802","nonce":3187441749},{"type":"v1","addr":"192.168.123.104:6803","nonce":3187441749}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6806","nonce":3187441749},{"type":"v1","addr":"192.168.123.104:6807","nonce":3187441749}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6804","nonce":3187441749},{"type":"v1","addr":"192.168.123.104:6805","nonce":3187441749}]},"public_addr":"192.168.123.104:6801/3187441749","cluster_addr":"192.168.123.104:6803/3187441749","heartbeat_back_addr":"192.168.123.104:6807/3187441749","heartbeat_front_addr":"192.168.123.104:6805/3187441749","state":["exists","up"]},{"osd":3,"uuid":"155a69d2-3a90-4797-870f-0d55995439d5","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":22,"up_thru":34,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6808","nonce":1533780217},{"type":"v1","addr":"192.168.123.104:6809","nonce":1533780217}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6810","nonce":1533780217},{"type":"v1","addr":"192.168.123.104:6811","nonce":1533780217}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6814","nonce":1533780217},{"type":"v1","addr":"192.168.123.104:6815","nonce":1533780217}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6812","nonce":1533780217},{"type":"v1","addr":"192.168.123.104:6813","nonce":1533780217}]},"public_addr":"192.168.123.104:6809/1533780217","cluster_addr":"192.168.123.104:6811/1533780217","heartbeat_back_addr":"192.168.123.104:6815/1533780217","heartbeat_front_addr":"192.168.123.104:6813/1533780217","state":["exists","up"]},{"osd":4,"uuid":"654075eb-fb5d-47cc-abb4-791f434384d1","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":28,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6800","nonce":3003059232},{"type":"v1","addr":"192.168.123.107:6801","nonce":3003059232}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6802","nonce":3003059232},{"type":"v1","addr":"192.168.123.107:6803","nonce":3003059232}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6806","nonce":3003059232},{"type":"v1","addr":"192.168.123.107:6807","nonce":3003059232}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6804","nonce":3003059232},{"type":"v1","addr":"192.168.123.107:6805","nonce":3003059232}]},"public_addr":"192.168.123.107:6801/3003059232","cluster_addr":"192.168.123.107:6803/3003059232","heartbeat_back_addr":"192.168.123.107:6807/3003059232","heartbeat_front_addr":"192.168.123.107:6805/3003059232","state":["exists","up"]},{"osd":5,"uuid":"9dec1d82-be8f-40f2-9e9a-cbb3b644be63","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":33,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6808","nonce":1009376405},{"type":"v1","addr":"192.168.123.107:6809","nonce":1009376405}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6810","nonce":1009376405},{"type":"v1","addr":"192.168.123.107:6811","nonce":1009376405}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6814","nonce":1009376405},{"type":"v1","addr":"192.168.123.107:6815","nonce":1009376405}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6812","nonce":1009376405},{"type":"v1","addr":"192.168.123.107:6813","nonce":1009376405}]},"public_addr":"192.168.123.107:6809/1009376405","cluster_addr":"192.168.123.107:6811/1009376405","heartbeat_back_addr":"192.168.123.107:6815/1009376405","heartbeat_front_addr":"192.168.123.107:6813/1009376405","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-07T10:15:17.245690+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-07T10:15:29.032199+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-07T10:15:40.274573+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-07T10:15:53.414722+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-07T10:16:06.017105+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-07T10:16:17.380660+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.101:0/2057860323":"2026-03-08T10:14:31.637587+0000","192.168.123.101:6801/121993066":"2026-03-08T10:14:31.637587+0000","192.168.123.101:6800/121993066":"2026-03-08T10:14:31.637587+0000","192.168.123.101:0/3859697181":"2026-03-08T10:14:31.637587+0000","192.168.123.101:6800/1072916292":"2026-03-08T10:14:13.780842+0000","192.168.123.101:0/748929155":"2026-03-08T10:14:31.637587+0000","192.168.123.101:0/783816649":"2026-03-08T10:14:13.780842+0000","192.168.123.101:6801/1072916292":"2026-03-08T10:14:13.780842+0000","192.168.123.101:0/2002072881":"2026-03-08T10:14:13.780842+0000","192.168.123.101:0/3516706839":"2026-03-08T10:14:13.780842+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-07T10:16:23.508 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:23 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/651246273' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-07T10:16:23.512 INFO:tasks.cephadm.ceph_manager.ceph:all up! 2026-03-07T10:16:23.512 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph osd dump --format=json 2026-03-07T10:16:23.584 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:23 vm07 ceph-mon[68568]: from='client.? 192.168.123.101:0/651246273' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-07T10:16:23.701 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:16:24.045 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-07T10:16:24.045 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":35,"fsid":"3fd6e214-1a0e-11f1-b256-99cfc35f3328","created":"2026-03-07T10:13:49.200752+0000","modified":"2026-03-07T10:16:21.236435+0000","last_up_change":"2026-03-07T10:16:19.224891+0000","last_in_change":"2026-03-07T10:16:07.902065+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":14,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":6,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"squid","allow_crimson":false,"pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-07T10:15:41.708136+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"is_stretch_pool":false,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"19","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}},"read_balance":{"score_type":"Fair distribution","score_acting":6,"score_stable":6,"optimal_score":0.5,"raw_score_acting":3,"raw_score_stable":3,"primary_affinity_weighted":1,"average_primary_affinity":1,"average_primary_affinity_weighted":1}}],"osds":[{"osd":0,"uuid":"2672a018-9301-4f8e-b634-bac5c79f0203","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":8,"up_thru":22,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6802","nonce":4213759478},{"type":"v1","addr":"192.168.123.101:6803","nonce":4213759478}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6804","nonce":4213759478},{"type":"v1","addr":"192.168.123.101:6805","nonce":4213759478}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6808","nonce":4213759478},{"type":"v1","addr":"192.168.123.101:6809","nonce":4213759478}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6806","nonce":4213759478},{"type":"v1","addr":"192.168.123.101:6807","nonce":4213759478}]},"public_addr":"192.168.123.101:6803/4213759478","cluster_addr":"192.168.123.101:6805/4213759478","heartbeat_back_addr":"192.168.123.101:6809/4213759478","heartbeat_front_addr":"192.168.123.101:6807/4213759478","state":["exists","up"]},{"osd":1,"uuid":"930d5f0c-1a5a-4d6c-b82c-8410fdb0227e","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":12,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6810","nonce":2110476213},{"type":"v1","addr":"192.168.123.101:6811","nonce":2110476213}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6812","nonce":2110476213},{"type":"v1","addr":"192.168.123.101:6813","nonce":2110476213}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6816","nonce":2110476213},{"type":"v1","addr":"192.168.123.101:6817","nonce":2110476213}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6814","nonce":2110476213},{"type":"v1","addr":"192.168.123.101:6815","nonce":2110476213}]},"public_addr":"192.168.123.101:6811/2110476213","cluster_addr":"192.168.123.101:6813/2110476213","heartbeat_back_addr":"192.168.123.101:6817/2110476213","heartbeat_front_addr":"192.168.123.101:6815/2110476213","state":["exists","up"]},{"osd":2,"uuid":"9403c18e-0c42-46a6-a584-58f450d0eb95","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":17,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6800","nonce":3187441749},{"type":"v1","addr":"192.168.123.104:6801","nonce":3187441749}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6802","nonce":3187441749},{"type":"v1","addr":"192.168.123.104:6803","nonce":3187441749}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6806","nonce":3187441749},{"type":"v1","addr":"192.168.123.104:6807","nonce":3187441749}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6804","nonce":3187441749},{"type":"v1","addr":"192.168.123.104:6805","nonce":3187441749}]},"public_addr":"192.168.123.104:6801/3187441749","cluster_addr":"192.168.123.104:6803/3187441749","heartbeat_back_addr":"192.168.123.104:6807/3187441749","heartbeat_front_addr":"192.168.123.104:6805/3187441749","state":["exists","up"]},{"osd":3,"uuid":"155a69d2-3a90-4797-870f-0d55995439d5","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":22,"up_thru":34,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6808","nonce":1533780217},{"type":"v1","addr":"192.168.123.104:6809","nonce":1533780217}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6810","nonce":1533780217},{"type":"v1","addr":"192.168.123.104:6811","nonce":1533780217}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6814","nonce":1533780217},{"type":"v1","addr":"192.168.123.104:6815","nonce":1533780217}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6812","nonce":1533780217},{"type":"v1","addr":"192.168.123.104:6813","nonce":1533780217}]},"public_addr":"192.168.123.104:6809/1533780217","cluster_addr":"192.168.123.104:6811/1533780217","heartbeat_back_addr":"192.168.123.104:6815/1533780217","heartbeat_front_addr":"192.168.123.104:6813/1533780217","state":["exists","up"]},{"osd":4,"uuid":"654075eb-fb5d-47cc-abb4-791f434384d1","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":28,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6800","nonce":3003059232},{"type":"v1","addr":"192.168.123.107:6801","nonce":3003059232}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6802","nonce":3003059232},{"type":"v1","addr":"192.168.123.107:6803","nonce":3003059232}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6806","nonce":3003059232},{"type":"v1","addr":"192.168.123.107:6807","nonce":3003059232}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6804","nonce":3003059232},{"type":"v1","addr":"192.168.123.107:6805","nonce":3003059232}]},"public_addr":"192.168.123.107:6801/3003059232","cluster_addr":"192.168.123.107:6803/3003059232","heartbeat_back_addr":"192.168.123.107:6807/3003059232","heartbeat_front_addr":"192.168.123.107:6805/3003059232","state":["exists","up"]},{"osd":5,"uuid":"9dec1d82-be8f-40f2-9e9a-cbb3b644be63","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":33,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6808","nonce":1009376405},{"type":"v1","addr":"192.168.123.107:6809","nonce":1009376405}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6810","nonce":1009376405},{"type":"v1","addr":"192.168.123.107:6811","nonce":1009376405}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6814","nonce":1009376405},{"type":"v1","addr":"192.168.123.107:6815","nonce":1009376405}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6812","nonce":1009376405},{"type":"v1","addr":"192.168.123.107:6813","nonce":1009376405}]},"public_addr":"192.168.123.107:6809/1009376405","cluster_addr":"192.168.123.107:6811/1009376405","heartbeat_back_addr":"192.168.123.107:6815/1009376405","heartbeat_front_addr":"192.168.123.107:6813/1009376405","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-07T10:15:17.245690+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-07T10:15:29.032199+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-07T10:15:40.274573+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-07T10:15:53.414722+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-07T10:16:06.017105+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-07T10:16:17.380660+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.101:0/2057860323":"2026-03-08T10:14:31.637587+0000","192.168.123.101:6801/121993066":"2026-03-08T10:14:31.637587+0000","192.168.123.101:6800/121993066":"2026-03-08T10:14:31.637587+0000","192.168.123.101:0/3859697181":"2026-03-08T10:14:31.637587+0000","192.168.123.101:6800/1072916292":"2026-03-08T10:14:13.780842+0000","192.168.123.101:0/748929155":"2026-03-08T10:14:31.637587+0000","192.168.123.101:0/783816649":"2026-03-08T10:14:13.780842+0000","192.168.123.101:6801/1072916292":"2026-03-08T10:14:13.780842+0000","192.168.123.101:0/2002072881":"2026-03-08T10:14:13.780842+0000","192.168.123.101:0/3516706839":"2026-03-08T10:14:13.780842+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-07T10:16:24.139 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph tell osd.0 flush_pg_stats 2026-03-07T10:16:24.139 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph tell osd.1 flush_pg_stats 2026-03-07T10:16:24.139 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph tell osd.2 flush_pg_stats 2026-03-07T10:16:24.139 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph tell osd.3 flush_pg_stats 2026-03-07T10:16:24.139 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph tell osd.4 flush_pg_stats 2026-03-07T10:16:24.139 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph tell osd.5 flush_pg_stats 2026-03-07T10:16:24.295 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:24 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/421422834' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-07T10:16:24.295 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:24 vm01 ceph-mon[49602]: pgmap v81: 1 pgs: 1 remapped+peering; 449 KiB data, 160 MiB used, 120 GiB / 120 GiB avail 2026-03-07T10:16:24.295 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:24 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/3658108791' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-07T10:16:24.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:24 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/421422834' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-07T10:16:24.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:24 vm04 ceph-mon[49935]: pgmap v81: 1 pgs: 1 remapped+peering; 449 KiB data, 160 MiB used, 120 GiB / 120 GiB avail 2026-03-07T10:16:24.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:24 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/3658108791' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-07T10:16:24.682 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:24 vm07 ceph-mon[68568]: from='client.? 192.168.123.101:0/421422834' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-07T10:16:24.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:24 vm07 ceph-mon[68568]: pgmap v81: 1 pgs: 1 remapped+peering; 449 KiB data, 160 MiB used, 120 GiB / 120 GiB avail 2026-03-07T10:16:24.683 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:24 vm07 ceph-mon[68568]: from='client.? 192.168.123.101:0/3658108791' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-07T10:16:25.031 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:16:25.072 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:16:25.093 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:16:25.119 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:16:25.128 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:16:25.208 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:16:25.787 INFO:teuthology.orchestra.run.vm01.stdout:120259084293 2026-03-07T10:16:25.788 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph osd last-stat-seq osd.4 2026-03-07T10:16:25.947 INFO:teuthology.orchestra.run.vm01.stdout:51539607565 2026-03-07T10:16:25.947 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph osd last-stat-seq osd.1 2026-03-07T10:16:26.316 INFO:teuthology.orchestra.run.vm01.stdout:94489280520 2026-03-07T10:16:26.316 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph osd last-stat-seq osd.3 2026-03-07T10:16:26.333 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:16:26.431 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:26 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:26.431 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:26 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:26.431 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:26 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:26.431 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:26 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:26.431 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:26 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:26.431 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:26 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:26.432 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:26 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:26.432 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:26 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:26.432 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:26 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:26.432 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:26 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:26.432 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:26 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:26.432 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:26 vm07 ceph-mon[68568]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:26.504 INFO:teuthology.orchestra.run.vm01.stdout:141733920771 2026-03-07T10:16:26.504 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph osd last-stat-seq osd.5 2026-03-07T10:16:26.521 INFO:teuthology.orchestra.run.vm01.stdout:34359738383 2026-03-07T10:16:26.521 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph osd last-stat-seq osd.0 2026-03-07T10:16:26.540 INFO:teuthology.orchestra.run.vm01.stdout:68719476746 2026-03-07T10:16:26.541 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph osd last-stat-seq osd.2 2026-03-07T10:16:26.599 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:16:26.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:26 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:26.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:26 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:26.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:26 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:26.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:26 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:26.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:26 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:26.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:26 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:26.776 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:16:27.009 INFO:teuthology.orchestra.run.vm01.stdout:120259084293 2026-03-07T10:16:27.084 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:16:27.149 INFO:tasks.cephadm.ceph_manager.ceph:need seq 120259084293 got 120259084293 for osd.4 2026-03-07T10:16:27.150 DEBUG:teuthology.parallel:result is None 2026-03-07T10:16:27.201 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:27 vm01 ceph-mon[49602]: pgmap v82: 1 pgs: 1 active+clean; 449 KiB data, 160 MiB used, 120 GiB / 120 GiB avail; 70 KiB/s, 0 objects/s recovering 2026-03-07T10:16:27.202 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:27 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/3598672924' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 4}]: dispatch 2026-03-07T10:16:27.345 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:16:27.412 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:16:27.497 INFO:teuthology.orchestra.run.vm01.stdout:51539607564 2026-03-07T10:16:27.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:27 vm04 ceph-mon[49935]: pgmap v82: 1 pgs: 1 active+clean; 449 KiB data, 160 MiB used, 120 GiB / 120 GiB avail; 70 KiB/s, 0 objects/s recovering 2026-03-07T10:16:27.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:27 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/3598672924' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 4}]: dispatch 2026-03-07T10:16:27.614 INFO:teuthology.orchestra.run.vm01.stdout:94489280519 2026-03-07T10:16:27.682 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:27 vm07 ceph-mon[68568]: pgmap v82: 1 pgs: 1 active+clean; 449 KiB data, 160 MiB used, 120 GiB / 120 GiB avail; 70 KiB/s, 0 objects/s recovering 2026-03-07T10:16:27.682 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:27 vm07 ceph-mon[68568]: from='client.? 192.168.123.101:0/3598672924' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 4}]: dispatch 2026-03-07T10:16:27.715 INFO:tasks.cephadm.ceph_manager.ceph:need seq 51539607565 got 51539607564 for osd.1 2026-03-07T10:16:27.730 INFO:tasks.cephadm.ceph_manager.ceph:need seq 94489280520 got 94489280519 for osd.3 2026-03-07T10:16:27.927 INFO:teuthology.orchestra.run.vm01.stdout:141733920770 2026-03-07T10:16:28.010 INFO:tasks.cephadm.ceph_manager.ceph:need seq 141733920771 got 141733920770 for osd.5 2026-03-07T10:16:28.057 INFO:teuthology.orchestra.run.vm01.stdout:34359738382 2026-03-07T10:16:28.079 INFO:teuthology.orchestra.run.vm01.stdout:68719476745 2026-03-07T10:16:28.136 INFO:tasks.cephadm.ceph_manager.ceph:need seq 34359738383 got 34359738382 for osd.0 2026-03-07T10:16:28.161 INFO:tasks.cephadm.ceph_manager.ceph:need seq 68719476746 got 68719476745 for osd.2 2026-03-07T10:16:28.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:28 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/2765121272' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-07T10:16:28.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:28 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/1143660640' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-07T10:16:28.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:28 vm01 ceph-mon[49602]: pgmap v83: 1 pgs: 1 active+clean; 449 KiB data, 160 MiB used, 120 GiB / 120 GiB avail; 56 KiB/s, 0 objects/s recovering 2026-03-07T10:16:28.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:28 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/2316057612' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-07T10:16:28.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:28 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/4180355682' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-07T10:16:28.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:28 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/721791311' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-07T10:16:28.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:28 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/2765121272' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-07T10:16:28.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:28 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/1143660640' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-07T10:16:28.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:28 vm04 ceph-mon[49935]: pgmap v83: 1 pgs: 1 active+clean; 449 KiB data, 160 MiB used, 120 GiB / 120 GiB avail; 56 KiB/s, 0 objects/s recovering 2026-03-07T10:16:28.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:28 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/2316057612' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-07T10:16:28.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:28 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/4180355682' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-07T10:16:28.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:28 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/721791311' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-07T10:16:28.682 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:28 vm07 ceph-mon[68568]: from='client.? 192.168.123.101:0/2765121272' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-07T10:16:28.682 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:28 vm07 ceph-mon[68568]: from='client.? 192.168.123.101:0/1143660640' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-07T10:16:28.682 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:28 vm07 ceph-mon[68568]: pgmap v83: 1 pgs: 1 active+clean; 449 KiB data, 160 MiB used, 120 GiB / 120 GiB avail; 56 KiB/s, 0 objects/s recovering 2026-03-07T10:16:28.682 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:28 vm07 ceph-mon[68568]: from='client.? 192.168.123.101:0/2316057612' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-07T10:16:28.682 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:28 vm07 ceph-mon[68568]: from='client.? 192.168.123.101:0/4180355682' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-07T10:16:28.682 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:28 vm07 ceph-mon[68568]: from='client.? 192.168.123.101:0/721791311' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-07T10:16:28.715 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph osd last-stat-seq osd.1 2026-03-07T10:16:28.731 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph osd last-stat-seq osd.3 2026-03-07T10:16:28.893 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:16:28.980 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:16:29.011 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph osd last-stat-seq osd.5 2026-03-07T10:16:29.137 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph osd last-stat-seq osd.0 2026-03-07T10:16:29.162 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph osd last-stat-seq osd.2 2026-03-07T10:16:29.401 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:16:29.534 INFO:teuthology.orchestra.run.vm01.stdout:94489280520 2026-03-07T10:16:29.620 INFO:teuthology.orchestra.run.vm01.stdout:51539607565 2026-03-07T10:16:29.838 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:29 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/2239023350' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-07T10:16:29.839 INFO:tasks.cephadm.ceph_manager.ceph:need seq 51539607565 got 51539607565 for osd.1 2026-03-07T10:16:29.839 DEBUG:teuthology.parallel:result is None 2026-03-07T10:16:29.849 INFO:tasks.cephadm.ceph_manager.ceph:need seq 94489280520 got 94489280520 for osd.3 2026-03-07T10:16:29.850 DEBUG:teuthology.parallel:result is None 2026-03-07T10:16:29.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:29 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/2239023350' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-07T10:16:29.855 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:16:29.890 INFO:teuthology.orchestra.run.vm01.stdout:141733920771 2026-03-07T10:16:29.932 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:29 vm07 ceph-mon[68568]: from='client.? 192.168.123.101:0/2239023350' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-07T10:16:29.962 INFO:tasks.cephadm.ceph_manager.ceph:need seq 141733920771 got 141733920771 for osd.5 2026-03-07T10:16:29.962 DEBUG:teuthology.parallel:result is None 2026-03-07T10:16:29.980 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:16:30.257 INFO:teuthology.orchestra.run.vm01.stdout:34359738383 2026-03-07T10:16:30.313 INFO:tasks.cephadm.ceph_manager.ceph:need seq 34359738383 got 34359738383 for osd.0 2026-03-07T10:16:30.314 DEBUG:teuthology.parallel:result is None 2026-03-07T10:16:30.325 INFO:teuthology.orchestra.run.vm01.stdout:68719476747 2026-03-07T10:16:30.373 INFO:tasks.cephadm.ceph_manager.ceph:need seq 68719476746 got 68719476747 for osd.2 2026-03-07T10:16:30.373 DEBUG:teuthology.parallel:result is None 2026-03-07T10:16:30.373 INFO:tasks.cephadm.ceph_manager.ceph:waiting for clean 2026-03-07T10:16:30.373 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph pg dump --format=json 2026-03-07T10:16:30.583 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:16:30.610 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:30 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/1568252954' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-07T10:16:30.610 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:30 vm01 ceph-mon[49602]: pgmap v84: 1 pgs: 1 active+clean; 449 KiB data, 160 MiB used, 120 GiB / 120 GiB avail; 47 KiB/s, 0 objects/s recovering 2026-03-07T10:16:30.610 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:30 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/2246767793' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-07T10:16:30.610 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:30 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/3451844082' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-07T10:16:30.610 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:30 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/2196348490' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-07T10:16:30.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:30 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/1568252954' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-07T10:16:30.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:30 vm04 ceph-mon[49935]: pgmap v84: 1 pgs: 1 active+clean; 449 KiB data, 160 MiB used, 120 GiB / 120 GiB avail; 47 KiB/s, 0 objects/s recovering 2026-03-07T10:16:30.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:30 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/2246767793' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-07T10:16:30.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:30 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/3451844082' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-07T10:16:30.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:30 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/2196348490' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-07T10:16:30.907 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-07T10:16:30.908 INFO:teuthology.orchestra.run.vm01.stderr:dumped all 2026-03-07T10:16:30.932 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:30 vm07 ceph-mon[68568]: from='client.? 192.168.123.101:0/1568252954' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-07T10:16:30.932 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:30 vm07 ceph-mon[68568]: pgmap v84: 1 pgs: 1 active+clean; 449 KiB data, 160 MiB used, 120 GiB / 120 GiB avail; 47 KiB/s, 0 objects/s recovering 2026-03-07T10:16:30.932 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:30 vm07 ceph-mon[68568]: from='client.? 192.168.123.101:0/2246767793' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-07T10:16:30.933 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:30 vm07 ceph-mon[68568]: from='client.? 192.168.123.101:0/3451844082' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-07T10:16:30.933 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:30 vm07 ceph-mon[68568]: from='client.? 192.168.123.101:0/2196348490' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-07T10:16:30.960 INFO:teuthology.orchestra.run.vm01.stdout:{"pg_ready":true,"pg_map":{"version":84,"stamp":"2026-03-07T10:16:29.675892+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":6,"num_bytes_recovered":1377840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":32,"ondisk_log_size":32,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":3,"num_osds":6,"num_per_pool_osds":6,"num_per_pool_omap_osds":3,"kb":125804544,"kb_used":164328,"kb_used_data":3276,"kb_used_omap":9,"kb_used_meta":160886,"kb_avail":125640216,"statfs":{"total":128823853056,"available":128655581184,"internally_reserved":0,"allocated":3354624,"data_stored":2112960,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":9537,"internal_metadata":164747967},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":459280,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"9.444912"},"pg_stats":[{"pgid":"1.0","version":"18'32","reported_seq":57,"reported_epoch":35,"state":"active+clean","last_fresh":"2026-03-07T10:16:21.552879+0000","last_change":"2026-03-07T10:16:21.552879+0000","last_active":"2026-03-07T10:16:21.552879+0000","last_peered":"2026-03-07T10:16:21.552879+0000","last_clean":"2026-03-07T10:16:21.552879+0000","last_became_active":"2026-03-07T10:16:21.246187+0000","last_became_peered":"2026-03-07T10:16:21.246187+0000","last_unstale":"2026-03-07T10:16:21.552879+0000","last_undegraded":"2026-03-07T10:16:21.552879+0000","last_fullsized":"2026-03-07T10:16:21.552879+0000","mapping_epoch":34,"log_start":"0'0","ondisk_log_start":"0'0","created":17,"last_epoch_clean":35,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-03-07T10:15:42.459258+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-03-07T10:15:42.459258+0000","last_clean_scrub_stamp":"2026-03-07T10:15:42.459258+0000","objects_scrubbed":0,"log_size":32,"log_dups_size":0,"ondisk_log_size":32,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-03-08T21:35:47.426302+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":6,"num_bytes_recovered":1377840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[3,5,2],"acting":[3,5,2],"avail_no_missing":[],"object_location_counts":[],"blocked_by":[],"up_primary":3,"acting_primary":3,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":6,"num_bytes_recovered":1377840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":2314240,"data_stored":2296400,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":32,"ondisk_log_size":32,"up":3,"acting":3,"num_store_stats":6}],"osd_stats":[{"osd":5,"up_from":33,"seq":141733920772,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":27612,"kb_used_data":772,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20939812,"statfs":{"total":21470642176,"available":21442367488,"internally_reserved":0,"allocated":790528,"data_stored":581800,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,1,2,3,4],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":4,"up_from":28,"seq":120259084294,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27160,"kb_used_data":320,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940264,"statfs":{"total":21470642176,"available":21442830336,"internally_reserved":0,"allocated":327680,"data_stored":122520,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,1,2,3,5],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":3,"up_from":22,"seq":94489280520,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":27612,"kb_used_data":772,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20939812,"statfs":{"total":21470642176,"available":21442367488,"internally_reserved":0,"allocated":790528,"data_stored":581800,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1588,"internal_metadata":27457996},"hb_peers":[0,1,2,4,5],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":2,"up_from":16,"seq":68719476747,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":27616,"kb_used_data":772,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20939808,"statfs":{"total":21470642176,"available":21442363392,"internally_reserved":0,"allocated":790528,"data_stored":581800,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,1,3,4,5],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":1,"up_from":12,"seq":51539607565,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27160,"kb_used_data":320,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940264,"statfs":{"total":21470642176,"available":21442830336,"internally_reserved":0,"allocated":327680,"data_stored":122520,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,2,3,4,5],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":0,"up_from":8,"seq":34359738383,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27168,"kb_used_data":320,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940256,"statfs":{"total":21470642176,"available":21442822144,"internally_reserved":0,"allocated":327680,"data_stored":122520,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1589,"internal_metadata":27457995},"hb_peers":[1,2,3,4,5],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]}],"pool_statfs":[{"poolid":1,"osd":0,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":1,"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":2,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":3,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":4,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":5,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-03-07T10:16:30.960 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph pg dump --format=json 2026-03-07T10:16:31.134 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:16:31.465 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-07T10:16:31.465 INFO:teuthology.orchestra.run.vm01.stderr:dumped all 2026-03-07T10:16:31.516 INFO:teuthology.orchestra.run.vm01.stdout:{"pg_ready":true,"pg_map":{"version":84,"stamp":"2026-03-07T10:16:29.675892+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":6,"num_bytes_recovered":1377840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":32,"ondisk_log_size":32,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":3,"num_osds":6,"num_per_pool_osds":6,"num_per_pool_omap_osds":3,"kb":125804544,"kb_used":164328,"kb_used_data":3276,"kb_used_omap":9,"kb_used_meta":160886,"kb_avail":125640216,"statfs":{"total":128823853056,"available":128655581184,"internally_reserved":0,"allocated":3354624,"data_stored":2112960,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":9537,"internal_metadata":164747967},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":459280,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"9.444912"},"pg_stats":[{"pgid":"1.0","version":"18'32","reported_seq":57,"reported_epoch":35,"state":"active+clean","last_fresh":"2026-03-07T10:16:21.552879+0000","last_change":"2026-03-07T10:16:21.552879+0000","last_active":"2026-03-07T10:16:21.552879+0000","last_peered":"2026-03-07T10:16:21.552879+0000","last_clean":"2026-03-07T10:16:21.552879+0000","last_became_active":"2026-03-07T10:16:21.246187+0000","last_became_peered":"2026-03-07T10:16:21.246187+0000","last_unstale":"2026-03-07T10:16:21.552879+0000","last_undegraded":"2026-03-07T10:16:21.552879+0000","last_fullsized":"2026-03-07T10:16:21.552879+0000","mapping_epoch":34,"log_start":"0'0","ondisk_log_start":"0'0","created":17,"last_epoch_clean":35,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-03-07T10:15:42.459258+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-03-07T10:15:42.459258+0000","last_clean_scrub_stamp":"2026-03-07T10:15:42.459258+0000","objects_scrubbed":0,"log_size":32,"log_dups_size":0,"ondisk_log_size":32,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-03-08T21:35:47.426302+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":6,"num_bytes_recovered":1377840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[3,5,2],"acting":[3,5,2],"avail_no_missing":[],"object_location_counts":[],"blocked_by":[],"up_primary":3,"acting_primary":3,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":6,"num_bytes_recovered":1377840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":2314240,"data_stored":2296400,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":32,"ondisk_log_size":32,"up":3,"acting":3,"num_store_stats":6}],"osd_stats":[{"osd":5,"up_from":33,"seq":141733920772,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":27612,"kb_used_data":772,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20939812,"statfs":{"total":21470642176,"available":21442367488,"internally_reserved":0,"allocated":790528,"data_stored":581800,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,1,2,3,4],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":4,"up_from":28,"seq":120259084294,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27160,"kb_used_data":320,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940264,"statfs":{"total":21470642176,"available":21442830336,"internally_reserved":0,"allocated":327680,"data_stored":122520,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,1,2,3,5],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":3,"up_from":22,"seq":94489280520,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":27612,"kb_used_data":772,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20939812,"statfs":{"total":21470642176,"available":21442367488,"internally_reserved":0,"allocated":790528,"data_stored":581800,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1588,"internal_metadata":27457996},"hb_peers":[0,1,2,4,5],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":2,"up_from":16,"seq":68719476747,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":27616,"kb_used_data":772,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20939808,"statfs":{"total":21470642176,"available":21442363392,"internally_reserved":0,"allocated":790528,"data_stored":581800,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,1,3,4,5],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":1,"up_from":12,"seq":51539607565,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27160,"kb_used_data":320,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940264,"statfs":{"total":21470642176,"available":21442830336,"internally_reserved":0,"allocated":327680,"data_stored":122520,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,2,3,4,5],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":0,"up_from":8,"seq":34359738383,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27168,"kb_used_data":320,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940256,"statfs":{"total":21470642176,"available":21442822144,"internally_reserved":0,"allocated":327680,"data_stored":122520,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1589,"internal_metadata":27457995},"hb_peers":[1,2,3,4,5],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]}],"pool_statfs":[{"poolid":1,"osd":0,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":1,"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":2,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":3,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":4,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":5,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-03-07T10:16:31.517 INFO:tasks.cephadm.ceph_manager.ceph:clean! 2026-03-07T10:16:31.517 INFO:tasks.ceph:Waiting until ceph cluster ceph is healthy... 2026-03-07T10:16:31.517 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy 2026-03-07T10:16:31.517 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- ceph health --format=json 2026-03-07T10:16:31.693 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:16:31.717 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:31 vm01 ceph-mon[49602]: from='client.14475 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:16:31.932 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:31 vm07 ceph-mon[68568]: from='client.14475 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:16:32.037 INFO:teuthology.orchestra.run.vm01.stdout: 2026-03-07T10:16:32.038 INFO:teuthology.orchestra.run.vm01.stdout:{"status":"HEALTH_OK","checks":{},"mutes":[]} 2026-03-07T10:16:32.094 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy done 2026-03-07T10:16:32.094 INFO:tasks.cephadm:Setup complete, yielding 2026-03-07T10:16:32.094 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-07T10:16:32.096 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm01.local 2026-03-07T10:16:32.096 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 -- bash -c 'set -ex 2026-03-07T10:16:32.096 DEBUG:teuthology.orchestra.run.vm01:> HOSTNAMES=$(ceph orch host ls --format json | jq -r '"'"'.[] | .hostname'"'"') 2026-03-07T10:16:32.096 DEBUG:teuthology.orchestra.run.vm01:> for host in $HOSTNAMES; do 2026-03-07T10:16:32.096 DEBUG:teuthology.orchestra.run.vm01:> # find the hostname for "host.c" which will have no mgr 2026-03-07T10:16:32.096 DEBUG:teuthology.orchestra.run.vm01:> HAS_MGRS=$(ceph orch ps --hostname ${host} --format json | jq '"'"'any(.daemon_type == "mgr")'"'"') 2026-03-07T10:16:32.096 DEBUG:teuthology.orchestra.run.vm01:> if [ "$HAS_MGRS" == "false" ]; then 2026-03-07T10:16:32.096 DEBUG:teuthology.orchestra.run.vm01:> HOST_C="${host}" 2026-03-07T10:16:32.096 DEBUG:teuthology.orchestra.run.vm01:> fi 2026-03-07T10:16:32.096 DEBUG:teuthology.orchestra.run.vm01:> done 2026-03-07T10:16:32.096 DEBUG:teuthology.orchestra.run.vm01:> # One last thing to worry about before draining the host 2026-03-07T10:16:32.096 DEBUG:teuthology.orchestra.run.vm01:> # is that the teuthology test tends to put the explicit 2026-03-07T10:16:32.096 DEBUG:teuthology.orchestra.run.vm01:> # hostnames in the placement for the mon service. 2026-03-07T10:16:32.096 DEBUG:teuthology.orchestra.run.vm01:> # We want to make sure we can drain without providing 2026-03-07T10:16:32.096 DEBUG:teuthology.orchestra.run.vm01:> # --force and there is a check for the host being removed 2026-03-07T10:16:32.096 DEBUG:teuthology.orchestra.run.vm01:> # being listed explicitly in the placements. Therefore, 2026-03-07T10:16:32.096 DEBUG:teuthology.orchestra.run.vm01:> # we should remove it from the mon placement. 2026-03-07T10:16:32.096 DEBUG:teuthology.orchestra.run.vm01:> ceph orch ls mon --export > mon.yaml 2026-03-07T10:16:32.097 DEBUG:teuthology.orchestra.run.vm01:> sed /"$HOST_C"/d mon.yaml > mon_adjusted.yaml 2026-03-07T10:16:32.097 DEBUG:teuthology.orchestra.run.vm01:> ceph orch apply -i mon_adjusted.yaml 2026-03-07T10:16:32.097 DEBUG:teuthology.orchestra.run.vm01:> # now drain that host 2026-03-07T10:16:32.097 DEBUG:teuthology.orchestra.run.vm01:> ceph orch host drain $HOST_C --zap-osd-devices 2026-03-07T10:16:32.097 DEBUG:teuthology.orchestra.run.vm01:> # wait for drain to complete 2026-03-07T10:16:32.097 DEBUG:teuthology.orchestra.run.vm01:> HOST_C_DAEMONS=$(ceph orch ps --hostname $HOST_C) 2026-03-07T10:16:32.097 DEBUG:teuthology.orchestra.run.vm01:> while [ "$HOST_C_DAEMONS" != "No daemons reported" ]; do 2026-03-07T10:16:32.097 DEBUG:teuthology.orchestra.run.vm01:> sleep 15 2026-03-07T10:16:32.097 DEBUG:teuthology.orchestra.run.vm01:> HOST_C_DAEMONS=$(ceph orch ps --hostname $HOST_C) 2026-03-07T10:16:32.097 DEBUG:teuthology.orchestra.run.vm01:> done 2026-03-07T10:16:32.097 DEBUG:teuthology.orchestra.run.vm01:> # we want to check the ability to remove the host from 2026-03-07T10:16:32.097 DEBUG:teuthology.orchestra.run.vm01:> # the CRUSH map, so we should first verify the host is in 2026-03-07T10:16:32.097 DEBUG:teuthology.orchestra.run.vm01:> # the CRUSH map. 2026-03-07T10:16:32.097 DEBUG:teuthology.orchestra.run.vm01:> ceph osd getcrushmap -o compiled-crushmap 2026-03-07T10:16:32.097 DEBUG:teuthology.orchestra.run.vm01:> crushtool -d compiled-crushmap -o crushmap.txt 2026-03-07T10:16:32.097 DEBUG:teuthology.orchestra.run.vm01:> CRUSH_MAP=$(cat crushmap.txt) 2026-03-07T10:16:32.097 DEBUG:teuthology.orchestra.run.vm01:> if ! grep -q "$HOST_C" <<< "$CRUSH_MAP"; then 2026-03-07T10:16:32.097 DEBUG:teuthology.orchestra.run.vm01:> printf "Expected to see $HOST_C in CRUSH map. Saw:\n\n$CRUSH_MAP" 2026-03-07T10:16:32.097 DEBUG:teuthology.orchestra.run.vm01:> exit 1 2026-03-07T10:16:32.097 DEBUG:teuthology.orchestra.run.vm01:> fi 2026-03-07T10:16:32.097 DEBUG:teuthology.orchestra.run.vm01:> # If the drain was successful, we should be able to remove the 2026-03-07T10:16:32.097 DEBUG:teuthology.orchestra.run.vm01:> # host without force with no issues. If there are still daemons 2026-03-07T10:16:32.097 DEBUG:teuthology.orchestra.run.vm01:> # we will get a response telling us to drain the host and a 2026-03-07T10:16:32.097 DEBUG:teuthology.orchestra.run.vm01:> # non-zero return code 2026-03-07T10:16:32.097 DEBUG:teuthology.orchestra.run.vm01:> ceph orch host rm $HOST_C --rm-crush-entry 2026-03-07T10:16:32.097 DEBUG:teuthology.orchestra.run.vm01:> # verify we'"'"'ve successfully removed the host from the CRUSH map 2026-03-07T10:16:32.097 DEBUG:teuthology.orchestra.run.vm01:> sleep 30 2026-03-07T10:16:32.097 DEBUG:teuthology.orchestra.run.vm01:> ceph osd getcrushmap -o compiled-crushmap 2026-03-07T10:16:32.097 DEBUG:teuthology.orchestra.run.vm01:> crushtool -d compiled-crushmap -o crushmap.txt 2026-03-07T10:16:32.097 DEBUG:teuthology.orchestra.run.vm01:> CRUSH_MAP=$(cat crushmap.txt) 2026-03-07T10:16:32.097 DEBUG:teuthology.orchestra.run.vm01:> if grep -q "$HOST_C" <<< "$CRUSH_MAP"; then 2026-03-07T10:16:32.097 DEBUG:teuthology.orchestra.run.vm01:> printf "Saw $HOST_C in CRUSH map after it should have been removed.\n\n$CRUSH_MAP" 2026-03-07T10:16:32.097 DEBUG:teuthology.orchestra.run.vm01:> exit 1 2026-03-07T10:16:32.097 DEBUG:teuthology.orchestra.run.vm01:> fi 2026-03-07T10:16:32.097 DEBUG:teuthology.orchestra.run.vm01:> ' 2026-03-07T10:16:32.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:31 vm04 ceph-mon[49935]: from='client.14475 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:16:32.272 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/mon.a/config 2026-03-07T10:16:32.355 INFO:teuthology.orchestra.run.vm01.stderr:++ ceph orch host ls --format json 2026-03-07T10:16:32.356 INFO:teuthology.orchestra.run.vm01.stderr:++ jq -r '.[] | .hostname' 2026-03-07T10:16:32.608 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:32 vm01 ceph-mon[49602]: from='client.14481 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:16:32.626 INFO:teuthology.orchestra.run.vm01.stderr:+ HOSTNAMES='vm01 2026-03-07T10:16:32.626 INFO:teuthology.orchestra.run.vm01.stderr:vm04 2026-03-07T10:16:32.626 INFO:teuthology.orchestra.run.vm01.stderr:vm07' 2026-03-07T10:16:32.626 INFO:teuthology.orchestra.run.vm01.stderr:+ for host in $HOSTNAMES 2026-03-07T10:16:32.626 INFO:teuthology.orchestra.run.vm01.stderr:++ ceph orch ps --hostname vm01 --format json 2026-03-07T10:16:32.626 INFO:teuthology.orchestra.run.vm01.stderr:++ jq 'any(.daemon_type == "mgr")' 2026-03-07T10:16:32.795 INFO:teuthology.orchestra.run.vm01.stderr:+ HAS_MGRS=true 2026-03-07T10:16:32.795 INFO:teuthology.orchestra.run.vm01.stderr:+ '[' true == false ']' 2026-03-07T10:16:32.795 INFO:teuthology.orchestra.run.vm01.stderr:+ for host in $HOSTNAMES 2026-03-07T10:16:32.795 INFO:teuthology.orchestra.run.vm01.stderr:++ ceph orch ps --hostname vm04 --format json 2026-03-07T10:16:32.795 INFO:teuthology.orchestra.run.vm01.stderr:++ jq 'any(.daemon_type == "mgr")' 2026-03-07T10:16:32.932 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:32 vm07 ceph-mon[68568]: from='client.14481 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:16:32.932 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:32 vm07 ceph-mon[68568]: pgmap v85: 1 pgs: 1 active+clean; 449 KiB data, 160 MiB used, 120 GiB / 120 GiB avail; 43 KiB/s, 0 objects/s recovering 2026-03-07T10:16:32.932 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:32 vm07 ceph-mon[68568]: from='client.? 192.168.123.101:0/4149381190' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-07T10:16:32.962 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:32 vm01 ceph-mon[49602]: pgmap v85: 1 pgs: 1 active+clean; 449 KiB data, 160 MiB used, 120 GiB / 120 GiB avail; 43 KiB/s, 0 objects/s recovering 2026-03-07T10:16:32.962 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:32 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/4149381190' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-07T10:16:32.963 INFO:teuthology.orchestra.run.vm01.stderr:+ HAS_MGRS=true 2026-03-07T10:16:32.963 INFO:teuthology.orchestra.run.vm01.stderr:+ '[' true == false ']' 2026-03-07T10:16:32.963 INFO:teuthology.orchestra.run.vm01.stderr:+ for host in $HOSTNAMES 2026-03-07T10:16:32.963 INFO:teuthology.orchestra.run.vm01.stderr:++ jq 'any(.daemon_type == "mgr")' 2026-03-07T10:16:32.963 INFO:teuthology.orchestra.run.vm01.stderr:++ ceph orch ps --hostname vm07 --format json 2026-03-07T10:16:33.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:32 vm04 ceph-mon[49935]: from='client.14481 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-07T10:16:33.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:32 vm04 ceph-mon[49935]: pgmap v85: 1 pgs: 1 active+clean; 449 KiB data, 160 MiB used, 120 GiB / 120 GiB avail; 43 KiB/s, 0 objects/s recovering 2026-03-07T10:16:33.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:32 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/4149381190' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-07T10:16:33.135 INFO:teuthology.orchestra.run.vm01.stderr:+ HAS_MGRS=false 2026-03-07T10:16:33.135 INFO:teuthology.orchestra.run.vm01.stderr:+ '[' false == false ']' 2026-03-07T10:16:33.135 INFO:teuthology.orchestra.run.vm01.stderr:+ HOST_C=vm07 2026-03-07T10:16:33.135 INFO:teuthology.orchestra.run.vm01.stderr:+ ceph orch ls mon --export 2026-03-07T10:16:33.308 INFO:teuthology.orchestra.run.vm01.stderr:+ sed /vm07/d mon.yaml 2026-03-07T10:16:33.309 INFO:teuthology.orchestra.run.vm01.stderr:+ ceph orch apply -i mon_adjusted.yaml 2026-03-07T10:16:33.474 INFO:teuthology.orchestra.run.vm01.stdout:Scheduled mon update... 2026-03-07T10:16:33.487 INFO:teuthology.orchestra.run.vm01.stderr:+ ceph orch host drain vm07 --zap-osd-devices 2026-03-07T10:16:33.659 INFO:teuthology.orchestra.run.vm01.stdout:Scheduled to remove the following daemons from host 'vm07' 2026-03-07T10:16:33.659 INFO:teuthology.orchestra.run.vm01.stdout:type id 2026-03-07T10:16:33.659 INFO:teuthology.orchestra.run.vm01.stdout:-------------------- --------------- 2026-03-07T10:16:33.659 INFO:teuthology.orchestra.run.vm01.stdout:agent vm07 2026-03-07T10:16:33.659 INFO:teuthology.orchestra.run.vm01.stdout:mon c 2026-03-07T10:16:33.659 INFO:teuthology.orchestra.run.vm01.stdout:osd 4 2026-03-07T10:16:33.659 INFO:teuthology.orchestra.run.vm01.stdout:osd 5 2026-03-07T10:16:33.672 INFO:teuthology.orchestra.run.vm01.stderr:++ ceph orch ps --hostname vm07 2026-03-07T10:16:33.817 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:33 vm07 podman[77129]: 2026-03-07 10:16:33.526523729 +0000 UTC m=+0.016266787 container died f89ae0cf3c5799e271bb895af9c8edacc4910ca61c480810f420de8c65a193f4 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mon-c, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git) 2026-03-07T10:16:33.817 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:33 vm07 podman[77129]: 2026-03-07 10:16:33.543344922 +0000 UTC m=+0.033087980 container remove f89ae0cf3c5799e271bb895af9c8edacc4910ca61c480810f420de8c65a193f4 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mon-c, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default) 2026-03-07T10:16:33.817 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:33 vm07 systemd[1]: ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@mon.c.service: Deactivated successfully. 2026-03-07T10:16:33.817 INFO:journalctl@ceph.mon.c.vm07.stdout:Mar 07 10:16:33 vm07 systemd[1]: ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@mon.c.service: Consumed 1.569s CPU time. 2026-03-07T10:16:33.853 INFO:teuthology.orchestra.run.vm01.stderr:+ HOST_C_DAEMONS='NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-07T10:16:33.853 INFO:teuthology.orchestra.run.vm01.stderr:agent.vm07 vm07 running 17s ago 108s - - 2026-03-07T10:16:33.853 INFO:teuthology.orchestra.run.vm01.stderr:mon.c vm07 running (100s) 17s ago 100s 44.5M 2048M 19.2.3-39-g340d3c24fc6 8bccc98d839a f89ae0cf3c57 2026-03-07T10:16:33.853 INFO:teuthology.orchestra.run.vm01.stderr:osd.4 vm07 running (30s) 17s ago 30s 54.5M 1664M 19.2.3-39-g340d3c24fc6 8bccc98d839a bf69212e50bb 2026-03-07T10:16:33.854 INFO:teuthology.orchestra.run.vm01.stderr:osd.5 vm07 running (18s) 17s ago 18s 12.3M 1664M 19.2.3-39-g340d3c24fc6 8bccc98d839a 809d1aaf4e8c ' 2026-03-07T10:16:33.854 INFO:teuthology.orchestra.run.vm01.stderr:+ '[' 'NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-07T10:16:33.854 INFO:teuthology.orchestra.run.vm01.stderr:agent.vm07 vm07 running 17s ago 108s - - 2026-03-07T10:16:33.854 INFO:teuthology.orchestra.run.vm01.stderr:mon.c vm07 running (100s) 17s ago 100s 44.5M 2048M 19.2.3-39-g340d3c24fc6 8bccc98d839a f89ae0cf3c57 2026-03-07T10:16:33.854 INFO:teuthology.orchestra.run.vm01.stderr:osd.4 vm07 running (30s) 17s ago 30s 54.5M 1664M 19.2.3-39-g340d3c24fc6 8bccc98d839a bf69212e50bb 2026-03-07T10:16:33.854 INFO:teuthology.orchestra.run.vm01.stderr:osd.5 vm07 running (18s) 17s ago 18s 12.3M 1664M 19.2.3-39-g340d3c24fc6 8bccc98d839a 809d1aaf4e8c ' '!=' 'No daemons reported' ']' 2026-03-07T10:16:33.854 INFO:teuthology.orchestra.run.vm01.stderr:+ sleep 15 2026-03-07T10:16:34.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:34 vm04 ceph-mon[49935]: from='client.24374 -' entity='client.admin' cmd=[{"prefix": "orch ls", "service_type": "mon", "export": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:16:34.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:34 vm04 ceph-mon[49935]: from='client.24377 -' entity='client.admin' cmd=[{"prefix": "orch apply", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:16:34.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:34 vm04 ceph-mon[49935]: Saving service mon spec with placement vm01:192.168.123.101=a;vm04:192.168.123.104=b;count:3 2026-03-07T10:16:34.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:34 vm04 ceph-mon[49935]: Safe to remove mon.c: new quorum should be ['a', 'b'] (from ['a', 'b']) 2026-03-07T10:16:34.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:34 vm04 ceph-mon[49935]: Removing monitor c from monmap... 2026-03-07T10:16:34.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:34 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd='[{"prefix": "mon rm", "name": "c"}]': finished 2026-03-07T10:16:34.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:34 vm04 ceph-mon[49935]: mon.a calling monitor election 2026-03-07T10:16:34.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:34 vm04 ceph-mon[49935]: mon.b calling monitor election 2026-03-07T10:16:34.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:34 vm04 ceph-mon[49935]: mon.a is new leader, mons a,b in quorum (ranks 0,1) 2026-03-07T10:16:34.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:34 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:16:34.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:34 vm04 ceph-mon[49935]: monmap epoch 4 2026-03-07T10:16:34.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:34 vm04 ceph-mon[49935]: fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 2026-03-07T10:16:34.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:34 vm04 ceph-mon[49935]: last_changed 2026-03-07T10:16:33.484368+0000 2026-03-07T10:16:34.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:34 vm04 ceph-mon[49935]: created 2026-03-07T10:13:48.241132+0000 2026-03-07T10:16:34.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:34 vm04 ceph-mon[49935]: min_mon_release 19 (squid) 2026-03-07T10:16:34.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:34 vm04 ceph-mon[49935]: election_strategy: 1 2026-03-07T10:16:34.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:34 vm04 ceph-mon[49935]: 0: [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] mon.a 2026-03-07T10:16:34.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:34 vm04 ceph-mon[49935]: 1: [v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0] mon.b 2026-03-07T10:16:34.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:34 vm04 ceph-mon[49935]: fsmap 2026-03-07T10:16:34.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:34 vm04 ceph-mon[49935]: osdmap e35: 6 total, 6 up, 6 in 2026-03-07T10:16:34.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:34 vm04 ceph-mon[49935]: mgrmap e15: a(active, since 2m), standbys: b 2026-03-07T10:16:34.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:34 vm04 ceph-mon[49935]: overall HEALTH_OK 2026-03-07T10:16:34.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:34 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:16:34.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:34 vm04 ceph-mon[49935]: Removing daemon mon.c from vm07 -- ports [] 2026-03-07T10:16:34.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:34 vm04 ceph-mon[49935]: from='client.14526 -' entity='client.admin' cmd=[{"prefix": "orch host drain", "hostname": "vm07", "zap_osd_devices": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:16:34.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:34 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:34.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:34 vm04 ceph-mon[49935]: Added label _no_schedule to host vm07 2026-03-07T10:16:34.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:34 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:34.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:34 vm04 ceph-mon[49935]: Added label _no_conf_keyring to host vm07 2026-03-07T10:16:34.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:34 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd crush tree", "format": "json"}]: dispatch 2026-03-07T10:16:34.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:34 vm04 ceph-mon[49935]: osd.4 crush weight is 0.0194854736328125 2026-03-07T10:16:34.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:34 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd crush tree", "format": "json"}]: dispatch 2026-03-07T10:16:34.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:34 vm04 ceph-mon[49935]: osd.5 crush weight is 0.0194854736328125 2026-03-07T10:16:34.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:34 vm04 ceph-mon[49935]: pgmap v86: 1 pgs: 1 active+clean; 449 KiB data, 160 MiB used, 120 GiB / 120 GiB avail; 37 KiB/s, 0 objects/s recovering 2026-03-07T10:16:34.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:34 vm04 ceph-mon[49935]: from='client.24387 -' entity='client.admin' cmd=[{"prefix": "orch ps", "hostname": "vm07", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:16:34.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:34 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:34.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:34 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:34.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:34 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:16:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:34 vm01 ceph-mon[49602]: from='client.24374 -' entity='client.admin' cmd=[{"prefix": "orch ls", "service_type": "mon", "export": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:16:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:34 vm01 ceph-mon[49602]: from='client.24377 -' entity='client.admin' cmd=[{"prefix": "orch apply", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:16:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:34 vm01 ceph-mon[49602]: Saving service mon spec with placement vm01:192.168.123.101=a;vm04:192.168.123.104=b;count:3 2026-03-07T10:16:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:34 vm01 ceph-mon[49602]: Safe to remove mon.c: new quorum should be ['a', 'b'] (from ['a', 'b']) 2026-03-07T10:16:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:34 vm01 ceph-mon[49602]: Removing monitor c from monmap... 2026-03-07T10:16:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:34 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd='[{"prefix": "mon rm", "name": "c"}]': finished 2026-03-07T10:16:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:34 vm01 ceph-mon[49602]: mon.a calling monitor election 2026-03-07T10:16:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:34 vm01 ceph-mon[49602]: mon.b calling monitor election 2026-03-07T10:16:34.879 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:34 vm01 ceph-mon[49602]: mon.a is new leader, mons a,b in quorum (ranks 0,1) 2026-03-07T10:16:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:34 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-07T10:16:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:34 vm01 ceph-mon[49602]: monmap epoch 4 2026-03-07T10:16:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:34 vm01 ceph-mon[49602]: fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 2026-03-07T10:16:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:34 vm01 ceph-mon[49602]: last_changed 2026-03-07T10:16:33.484368+0000 2026-03-07T10:16:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:34 vm01 ceph-mon[49602]: created 2026-03-07T10:13:48.241132+0000 2026-03-07T10:16:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:34 vm01 ceph-mon[49602]: min_mon_release 19 (squid) 2026-03-07T10:16:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:34 vm01 ceph-mon[49602]: election_strategy: 1 2026-03-07T10:16:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:34 vm01 ceph-mon[49602]: 0: [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] mon.a 2026-03-07T10:16:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:34 vm01 ceph-mon[49602]: 1: [v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0] mon.b 2026-03-07T10:16:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:34 vm01 ceph-mon[49602]: fsmap 2026-03-07T10:16:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:34 vm01 ceph-mon[49602]: osdmap e35: 6 total, 6 up, 6 in 2026-03-07T10:16:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:34 vm01 ceph-mon[49602]: mgrmap e15: a(active, since 2m), standbys: b 2026-03-07T10:16:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:34 vm01 ceph-mon[49602]: overall HEALTH_OK 2026-03-07T10:16:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:34 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-07T10:16:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:34 vm01 ceph-mon[49602]: Removing daemon mon.c from vm07 -- ports [] 2026-03-07T10:16:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:34 vm01 ceph-mon[49602]: from='client.14526 -' entity='client.admin' cmd=[{"prefix": "orch host drain", "hostname": "vm07", "zap_osd_devices": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:16:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:34 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:34 vm01 ceph-mon[49602]: Added label _no_schedule to host vm07 2026-03-07T10:16:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:34 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:34 vm01 ceph-mon[49602]: Added label _no_conf_keyring to host vm07 2026-03-07T10:16:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:34 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd crush tree", "format": "json"}]: dispatch 2026-03-07T10:16:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:34 vm01 ceph-mon[49602]: osd.4 crush weight is 0.0194854736328125 2026-03-07T10:16:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:34 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd crush tree", "format": "json"}]: dispatch 2026-03-07T10:16:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:34 vm01 ceph-mon[49602]: osd.5 crush weight is 0.0194854736328125 2026-03-07T10:16:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:34 vm01 ceph-mon[49602]: pgmap v86: 1 pgs: 1 active+clean; 449 KiB data, 160 MiB used, 120 GiB / 120 GiB avail; 37 KiB/s, 0 objects/s recovering 2026-03-07T10:16:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:34 vm01 ceph-mon[49602]: from='client.24387 -' entity='client.admin' cmd=[{"prefix": "orch ps", "hostname": "vm07", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:16:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:34 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:34 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:34.880 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:34 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:16:35.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:35 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:35.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:35 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:35.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:35 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:35.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:35 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:36.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:35 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:36.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:35 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:36.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:35 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:36.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:35 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:36.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:36 vm01 ceph-mon[49602]: pgmap v87: 1 pgs: 1 active+clean; 449 KiB data, 160 MiB used, 120 GiB / 120 GiB avail; 37 KiB/s, 0 objects/s recovering 2026-03-07T10:16:36.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:36 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:36.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:36 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:36.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:36 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:36.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:36 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:36.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:36 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:16:36.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:36 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:36.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:36 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:36.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:36 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:36.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:36 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:36.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:36 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:36.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:36 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:36.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:36 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:36.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:36 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:36.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:36 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:36.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:36 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:36.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:36 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"]}]: dispatch 2026-03-07T10:16:36.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:36 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd crush reweight", "name": "osd.5", "weight": 0.0}]: dispatch 2026-03-07T10:16:37.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:36 vm04 ceph-mon[49935]: pgmap v87: 1 pgs: 1 active+clean; 449 KiB data, 160 MiB used, 120 GiB / 120 GiB avail; 37 KiB/s, 0 objects/s recovering 2026-03-07T10:16:37.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:36 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:37.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:36 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:37.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:36 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:37.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:36 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:37.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:36 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:16:37.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:36 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:37.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:36 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:37.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:36 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:37.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:36 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:37.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:36 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:37.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:36 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:37.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:36 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:37.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:36 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:37.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:36 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:37.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:36 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:37.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:36 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"]}]: dispatch 2026-03-07T10:16:37.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:36 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd crush reweight", "name": "osd.5", "weight": 0.0}]: dispatch 2026-03-07T10:16:37.664 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:37 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mon-a[49598]: 2026-03-07T10:16:37.603+0000 7fb675298640 -1 mon.a@0(leader).osd e36 definitely_dead 0 2026-03-07T10:16:37.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:37 vm01 ceph-mon[49602]: Detected new or changed devices on vm07 2026-03-07T10:16:37.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:37 vm01 ceph-mon[49602]: Updating vm01:/etc/ceph/ceph.conf 2026-03-07T10:16:37.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:37 vm01 ceph-mon[49602]: Updating vm04:/etc/ceph/ceph.conf 2026-03-07T10:16:37.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:37 vm01 ceph-mon[49602]: Removing vm07:/var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/config/ceph.conf 2026-03-07T10:16:37.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:37 vm01 ceph-mon[49602]: Detected new or changed devices on vm07 2026-03-07T10:16:37.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:37 vm01 ceph-mon[49602]: Removing vm07:/etc/ceph/ceph.client.admin.keyring 2026-03-07T10:16:37.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:37 vm01 ceph-mon[49602]: Removing vm07:/var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/config/ceph.client.admin.keyring 2026-03-07T10:16:37.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:37 vm01 ceph-mon[49602]: Updating vm04:/var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/config/ceph.conf 2026-03-07T10:16:37.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:37 vm01 ceph-mon[49602]: Updating vm01:/var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/config/ceph.conf 2026-03-07T10:16:37.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:37 vm01 ceph-mon[49602]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:37.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:37 vm01 ceph-mon[49602]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:37.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:37 vm01 ceph-mon[49602]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"]}]: dispatch 2026-03-07T10:16:37.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:37 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:37.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:37 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:37.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:37 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:37.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:37 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd='[{"prefix": "osd crush reweight", "name": "osd.5", "weight": 0.0}]': finished 2026-03-07T10:16:37.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:37 vm01 ceph-mon[49602]: osdmap e36: 6 total, 6 up, 6 in 2026-03-07T10:16:37.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:37 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:37.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:37 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd safe-to-destroy", "ids": ["4"]}]: dispatch 2026-03-07T10:16:37.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:37 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd down", "ids": ["4"]}]: dispatch 2026-03-07T10:16:38.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:37 vm04 ceph-mon[49935]: Detected new or changed devices on vm07 2026-03-07T10:16:38.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:37 vm04 ceph-mon[49935]: Updating vm01:/etc/ceph/ceph.conf 2026-03-07T10:16:38.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:37 vm04 ceph-mon[49935]: Updating vm04:/etc/ceph/ceph.conf 2026-03-07T10:16:38.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:37 vm04 ceph-mon[49935]: Removing vm07:/var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/config/ceph.conf 2026-03-07T10:16:38.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:37 vm04 ceph-mon[49935]: Detected new or changed devices on vm07 2026-03-07T10:16:38.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:37 vm04 ceph-mon[49935]: Removing vm07:/etc/ceph/ceph.client.admin.keyring 2026-03-07T10:16:38.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:37 vm04 ceph-mon[49935]: Removing vm07:/var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/config/ceph.client.admin.keyring 2026-03-07T10:16:38.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:37 vm04 ceph-mon[49935]: Updating vm04:/var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/config/ceph.conf 2026-03-07T10:16:38.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:37 vm04 ceph-mon[49935]: Updating vm01:/var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/config/ceph.conf 2026-03-07T10:16:38.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:37 vm04 ceph-mon[49935]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:38.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:37 vm04 ceph-mon[49935]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:38.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:37 vm04 ceph-mon[49935]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"]}]: dispatch 2026-03-07T10:16:38.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:37 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:38.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:37 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:38.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:37 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:38.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:37 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd='[{"prefix": "osd crush reweight", "name": "osd.5", "weight": 0.0}]': finished 2026-03-07T10:16:38.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:37 vm04 ceph-mon[49935]: osdmap e36: 6 total, 6 up, 6 in 2026-03-07T10:16:38.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:37 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:38.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:37 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd safe-to-destroy", "ids": ["4"]}]: dispatch 2026-03-07T10:16:38.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:37 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd down", "ids": ["4"]}]: dispatch 2026-03-07T10:16:38.862 INFO:journalctl@ceph.osd.4.vm07.stdout:Mar 07 10:16:38 vm07 systemd[1]: Stopping Ceph osd.4 for 3fd6e214-1a0e-11f1-b256-99cfc35f3328... 2026-03-07T10:16:38.862 INFO:journalctl@ceph.osd.4.vm07.stdout:Mar 07 10:16:38 vm07 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-4[72943]: 2026-03-07T10:16:38.830+0000 7fefd9177640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.4 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-07T10:16:38.862 INFO:journalctl@ceph.osd.4.vm07.stdout:Mar 07 10:16:38 vm07 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-4[72943]: 2026-03-07T10:16:38.830+0000 7fefd9177640 -1 osd.4 36 *** Got signal Terminated *** 2026-03-07T10:16:38.862 INFO:journalctl@ceph.osd.4.vm07.stdout:Mar 07 10:16:38 vm07 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-4[72943]: 2026-03-07T10:16:38.830+0000 7fefd9177640 -1 osd.4 36 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-07T10:16:38.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:38 vm01 ceph-mon[49602]: osd.5 weight is now 0.0 2026-03-07T10:16:38.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:38 vm01 ceph-mon[49602]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:38.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:38 vm01 ceph-mon[49602]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd safe-to-destroy", "ids": ["4"]}]: dispatch 2026-03-07T10:16:38.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:38 vm01 ceph-mon[49602]: pgmap v89: 1 pgs: 1 active+clean; 449 KiB data, 160 MiB used, 120 GiB / 120 GiB avail 2026-03-07T10:16:38.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:38 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:38.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:38 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:38.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:38 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:38.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:38 vm01 ceph-mon[49602]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-07T10:16:38.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:38 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd='[{"prefix": "osd down", "ids": ["4"]}]': finished 2026-03-07T10:16:38.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:38 vm01 ceph-mon[49602]: osdmap e37: 6 total, 5 up, 6 in 2026-03-07T10:16:38.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:38 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:39.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:38 vm04 ceph-mon[49935]: osd.5 weight is now 0.0 2026-03-07T10:16:39.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:38 vm04 ceph-mon[49935]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:39.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:38 vm04 ceph-mon[49935]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd safe-to-destroy", "ids": ["4"]}]: dispatch 2026-03-07T10:16:39.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:38 vm04 ceph-mon[49935]: pgmap v89: 1 pgs: 1 active+clean; 449 KiB data, 160 MiB used, 120 GiB / 120 GiB avail 2026-03-07T10:16:39.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:38 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:39.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:38 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:39.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:38 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:39.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:38 vm04 ceph-mon[49935]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-07T10:16:39.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:38 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd='[{"prefix": "osd down", "ids": ["4"]}]': finished 2026-03-07T10:16:39.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:38 vm04 ceph-mon[49935]: osdmap e37: 6 total, 5 up, 6 in 2026-03-07T10:16:39.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:38 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:39.138 INFO:journalctl@ceph.osd.4.vm07.stdout:Mar 07 10:16:38 vm07 podman[80840]: 2026-03-07 10:16:38.862721199 +0000 UTC m=+0.047040195 container died bf69212e50bba06abb9c3708b8e0eb134e4f40c417d08751afb070e48c08664f (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-4, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8) 2026-03-07T10:16:39.138 INFO:journalctl@ceph.osd.4.vm07.stdout:Mar 07 10:16:38 vm07 podman[80840]: 2026-03-07 10:16:38.897403462 +0000 UTC m=+0.081722458 container remove bf69212e50bba06abb9c3708b8e0eb134e4f40c417d08751afb070e48c08664f (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-4, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-07T10:16:39.138 INFO:journalctl@ceph.osd.4.vm07.stdout:Mar 07 10:16:38 vm07 bash[80840]: ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-4 2026-03-07T10:16:39.138 INFO:journalctl@ceph.osd.4.vm07.stdout:Mar 07 10:16:39 vm07 podman[80905]: 2026-03-07 10:16:39.045662723 +0000 UTC m=+0.017776934 container create 1eb4f7377c5beaa16d78014bab8f287444d16e504afe5b13db7c57868bc8d32f (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-4-deactivate, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default) 2026-03-07T10:16:39.138 INFO:journalctl@ceph.osd.4.vm07.stdout:Mar 07 10:16:39 vm07 podman[80905]: 2026-03-07 10:16:39.090117066 +0000 UTC m=+0.062231287 container init 1eb4f7377c5beaa16d78014bab8f287444d16e504afe5b13db7c57868bc8d32f (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-4-deactivate, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-07T10:16:39.138 INFO:journalctl@ceph.osd.4.vm07.stdout:Mar 07 10:16:39 vm07 podman[80905]: 2026-03-07 10:16:39.093374031 +0000 UTC m=+0.065488242 container start 1eb4f7377c5beaa16d78014bab8f287444d16e504afe5b13db7c57868bc8d32f (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-4-deactivate, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6) 2026-03-07T10:16:39.138 INFO:journalctl@ceph.osd.4.vm07.stdout:Mar 07 10:16:39 vm07 podman[80905]: 2026-03-07 10:16:39.099995285 +0000 UTC m=+0.072109496 container attach 1eb4f7377c5beaa16d78014bab8f287444d16e504afe5b13db7c57868bc8d32f (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-4-deactivate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9) 2026-03-07T10:16:39.138 INFO:journalctl@ceph.osd.4.vm07.stdout:Mar 07 10:16:39 vm07 podman[80905]: 2026-03-07 10:16:39.039055677 +0000 UTC m=+0.011169897 image pull 8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1 harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0 2026-03-07T10:16:39.932 INFO:journalctl@ceph.osd.4.vm07.stdout:Mar 07 10:16:39 vm07 podman[80905]: 2026-03-07 10:16:39.516693643 +0000 UTC m=+0.488807854 container died 1eb4f7377c5beaa16d78014bab8f287444d16e504afe5b13db7c57868bc8d32f (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-4-deactivate, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8) 2026-03-07T10:16:39.932 INFO:journalctl@ceph.osd.4.vm07.stdout:Mar 07 10:16:39 vm07 podman[80905]: 2026-03-07 10:16:39.541416486 +0000 UTC m=+0.513530697 container remove 1eb4f7377c5beaa16d78014bab8f287444d16e504afe5b13db7c57868bc8d32f (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-4-deactivate, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8) 2026-03-07T10:16:39.932 INFO:journalctl@ceph.osd.4.vm07.stdout:Mar 07 10:16:39 vm07 systemd[1]: ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@osd.4.service: Deactivated successfully. 2026-03-07T10:16:39.933 INFO:journalctl@ceph.osd.4.vm07.stdout:Mar 07 10:16:39 vm07 systemd[1]: ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@osd.4.service: Unit process 80916 (conmon) remains running after unit stopped. 2026-03-07T10:16:39.933 INFO:journalctl@ceph.osd.4.vm07.stdout:Mar 07 10:16:39 vm07 systemd[1]: ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@osd.4.service: Unit process 80924 (podman) remains running after unit stopped. 2026-03-07T10:16:39.933 INFO:journalctl@ceph.osd.4.vm07.stdout:Mar 07 10:16:39 vm07 systemd[1]: Stopped Ceph osd.4 for 3fd6e214-1a0e-11f1-b256-99cfc35f3328. 2026-03-07T10:16:39.933 INFO:journalctl@ceph.osd.4.vm07.stdout:Mar 07 10:16:39 vm07 systemd[1]: ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@osd.4.service: Consumed 1.182s CPU time, 91.9M memory peak. 2026-03-07T10:16:39.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:39 vm01 ceph-mon[49602]: osd.4 now down 2026-03-07T10:16:39.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:39 vm01 ceph-mon[49602]: Removing daemon osd.4 from vm07 -- ports [] 2026-03-07T10:16:39.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:39 vm01 ceph-mon[49602]: osdmap e38: 6 total, 5 up, 6 in 2026-03-07T10:16:40.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:39 vm04 ceph-mon[49935]: osd.4 now down 2026-03-07T10:16:40.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:39 vm04 ceph-mon[49935]: Removing daemon osd.4 from vm07 -- ports [] 2026-03-07T10:16:40.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:39 vm04 ceph-mon[49935]: osdmap e38: 6 total, 5 up, 6 in 2026-03-07T10:16:40.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:40 vm01 ceph-mon[49602]: pgmap v92: 1 pgs: 1 active+clean; 449 KiB data, 160 MiB used, 120 GiB / 120 GiB avail 2026-03-07T10:16:40.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:40 vm01 ceph-mon[49602]: Removing key for osd.4 2026-03-07T10:16:40.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:40 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth rm", "entity": "osd.4"}]: dispatch 2026-03-07T10:16:40.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:40 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd='[{"prefix": "auth rm", "entity": "osd.4"}]': finished 2026-03-07T10:16:40.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:40 vm01 ceph-mon[49602]: Successfully removed osd.4 on vm07 2026-03-07T10:16:40.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:40 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd purge-actual", "id": 4, "yes_i_really_mean_it": true}]: dispatch 2026-03-07T10:16:40.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:40 vm01 ceph-mon[49602]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-07T10:16:40.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:40 vm01 ceph-mon[49602]: Cluster is now healthy 2026-03-07T10:16:40.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:40 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd='[{"prefix": "osd purge-actual", "id": 4, "yes_i_really_mean_it": true}]': finished 2026-03-07T10:16:40.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:40 vm01 ceph-mon[49602]: osdmap e39: 5 total, 5 up, 5 in 2026-03-07T10:16:40.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:40 vm01 ceph-mon[49602]: Successfully purged osd.4 on vm07 2026-03-07T10:16:40.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:40 vm01 ceph-mon[49602]: Zapping devices for osd.4 on vm07 2026-03-07T10:16:40.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:40 vm01 ceph-mon[49602]: osdmap e40: 5 total, 5 up, 5 in 2026-03-07T10:16:41.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:40 vm04 ceph-mon[49935]: pgmap v92: 1 pgs: 1 active+clean; 449 KiB data, 160 MiB used, 120 GiB / 120 GiB avail 2026-03-07T10:16:41.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:40 vm04 ceph-mon[49935]: Removing key for osd.4 2026-03-07T10:16:41.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:40 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth rm", "entity": "osd.4"}]: dispatch 2026-03-07T10:16:41.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:40 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd='[{"prefix": "auth rm", "entity": "osd.4"}]': finished 2026-03-07T10:16:41.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:40 vm04 ceph-mon[49935]: Successfully removed osd.4 on vm07 2026-03-07T10:16:41.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:40 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd purge-actual", "id": 4, "yes_i_really_mean_it": true}]: dispatch 2026-03-07T10:16:41.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:40 vm04 ceph-mon[49935]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-07T10:16:41.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:40 vm04 ceph-mon[49935]: Cluster is now healthy 2026-03-07T10:16:41.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:40 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd='[{"prefix": "osd purge-actual", "id": 4, "yes_i_really_mean_it": true}]': finished 2026-03-07T10:16:41.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:40 vm04 ceph-mon[49935]: osdmap e39: 5 total, 5 up, 5 in 2026-03-07T10:16:41.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:40 vm04 ceph-mon[49935]: Successfully purged osd.4 on vm07 2026-03-07T10:16:41.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:40 vm04 ceph-mon[49935]: Zapping devices for osd.4 on vm07 2026-03-07T10:16:41.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:40 vm04 ceph-mon[49935]: osdmap e40: 5 total, 5 up, 5 in 2026-03-07T10:16:42.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:41 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:42.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:41 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:42.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:41 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:16:42.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:41 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:42.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:41 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:42.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:41 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:16:43.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:42 vm04 ceph-mon[49935]: Successfully zapped devices for osd.4 on vm07 2026-03-07T10:16:43.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:42 vm04 ceph-mon[49935]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:43.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:42 vm04 ceph-mon[49935]: pgmap v95: 1 pgs: 1 remapped+peering; 449 KiB data, 134 MiB used, 100 GiB / 100 GiB avail 2026-03-07T10:16:43.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:42 vm04 ceph-mon[49935]: osdmap e41: 5 total, 5 up, 5 in 2026-03-07T10:16:43.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:42 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:43.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:42 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:43.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:42 vm01 ceph-mon[49602]: Successfully zapped devices for osd.4 on vm07 2026-03-07T10:16:43.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:42 vm01 ceph-mon[49602]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:43.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:42 vm01 ceph-mon[49602]: pgmap v95: 1 pgs: 1 remapped+peering; 449 KiB data, 134 MiB used, 100 GiB / 100 GiB avail 2026-03-07T10:16:43.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:42 vm01 ceph-mon[49602]: osdmap e41: 5 total, 5 up, 5 in 2026-03-07T10:16:43.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:42 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:43.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:42 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:44.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:44 vm04 ceph-mon[49935]: Detected new or changed devices on vm07 2026-03-07T10:16:44.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:44 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:44.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:44 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:44.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:44 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:44.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:44 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:16:44.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:44 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:44.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:44 vm04 ceph-mon[49935]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:44.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:44 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:44.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:44 vm04 ceph-mon[49935]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:44.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:44 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:44.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:44 vm04 ceph-mon[49935]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:44.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:44 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:44.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:44 vm04 ceph-mon[49935]: Removing daemon agent.vm07 from vm07 -- ports [] 2026-03-07T10:16:44.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:44 vm04 ceph-mon[49935]: Removing key for client.agent.vm07 2026-03-07T10:16:44.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:44 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth rm", "entity": "client.agent.vm07"}]: dispatch 2026-03-07T10:16:44.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:44 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd='[{"prefix": "auth rm", "entity": "client.agent.vm07"}]': finished 2026-03-07T10:16:44.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:44 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:44.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:44 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:44.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:44 vm04 ceph-mon[49935]: pgmap v97: 1 pgs: 1 remapped+peering; 449 KiB data, 134 MiB used, 100 GiB / 100 GiB avail 2026-03-07T10:16:44.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:44 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:16:44.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:44 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:44.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:44 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:44.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:44 vm01 ceph-mon[49602]: Detected new or changed devices on vm07 2026-03-07T10:16:44.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:44 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:44.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:44 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:44.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:44 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:44.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:44 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:16:44.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:44 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:44.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:44 vm01 ceph-mon[49602]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:44.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:44 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:44.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:44 vm01 ceph-mon[49602]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:44.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:44 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:44.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:44 vm01 ceph-mon[49602]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:44.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:44 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:44.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:44 vm01 ceph-mon[49602]: Removing daemon agent.vm07 from vm07 -- ports [] 2026-03-07T10:16:44.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:44 vm01 ceph-mon[49602]: Removing key for client.agent.vm07 2026-03-07T10:16:44.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:44 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth rm", "entity": "client.agent.vm07"}]: dispatch 2026-03-07T10:16:44.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:44 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd='[{"prefix": "auth rm", "entity": "client.agent.vm07"}]': finished 2026-03-07T10:16:44.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:44 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:44.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:44 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:44.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:44 vm01 ceph-mon[49602]: pgmap v97: 1 pgs: 1 remapped+peering; 449 KiB data, 134 MiB used, 100 GiB / 100 GiB avail 2026-03-07T10:16:44.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:44 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:16:44.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:44 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:44.725 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:44 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:46.726 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:46 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:46.726 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:46 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:46.726 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:46 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:46.726 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:46 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:46.726 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:46 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:46.726 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:46 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:46.726 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:46 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:16:46.726 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:46 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:46.726 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:46 vm01 ceph-mon[49602]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:46.726 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:46 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:46.726 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:46 vm01 ceph-mon[49602]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:46.726 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:46 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:46.726 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:46 vm01 ceph-mon[49602]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:46.726 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:46 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:46.726 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:46 vm01 ceph-mon[49602]: Reconfiguring mon.a (monmap changed)... 2026-03-07T10:16:46.726 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:46 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-07T10:16:46.726 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:46 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-07T10:16:46.726 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:46 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:46.726 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:46 vm01 ceph-mon[49602]: Reconfiguring daemon mon.a on vm01 2026-03-07T10:16:46.726 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:46 vm01 ceph-mon[49602]: pgmap v98: 1 pgs: 1 active+clean+wait; 449 KiB data, 134 MiB used, 100 GiB / 100 GiB avail; 75 KiB/s, 0 objects/s recovering 2026-03-07T10:16:46.726 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:46 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:46.726 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:46 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:46.726 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:46 vm01 ceph-mon[49602]: Reconfiguring mgr.a (monmap changed)... 2026-03-07T10:16:46.726 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:46 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.a", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-07T10:16:46.726 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:46 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-07T10:16:46.726 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:46 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:46.726 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:46 vm01 ceph-mon[49602]: Reconfiguring daemon mgr.a on vm01 2026-03-07T10:16:46.726 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:46 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:46.726 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:46 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:46.726 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:46 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-07T10:16:46.726 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:46 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:46.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:46.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:46.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:46.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:46.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:46.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:46.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:16:46.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:46.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:46 vm04 ceph-mon[49935]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:46.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:46.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:46 vm04 ceph-mon[49935]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:46.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:46.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:46 vm04 ceph-mon[49935]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:46.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:46.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:46 vm04 ceph-mon[49935]: Reconfiguring mon.a (monmap changed)... 2026-03-07T10:16:46.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-07T10:16:46.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-07T10:16:46.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:46.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:46 vm04 ceph-mon[49935]: Reconfiguring daemon mon.a on vm01 2026-03-07T10:16:46.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:46 vm04 ceph-mon[49935]: pgmap v98: 1 pgs: 1 active+clean+wait; 449 KiB data, 134 MiB used, 100 GiB / 100 GiB avail; 75 KiB/s, 0 objects/s recovering 2026-03-07T10:16:46.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:46.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:46.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:46 vm04 ceph-mon[49935]: Reconfiguring mgr.a (monmap changed)... 2026-03-07T10:16:46.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.a", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-07T10:16:46.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-07T10:16:46.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:46.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:46 vm04 ceph-mon[49935]: Reconfiguring daemon mgr.a on vm01 2026-03-07T10:16:46.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:46.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:46.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-07T10:16:46.851 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:46 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:47.743 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:47 vm04 ceph-mon[49935]: Reconfiguring osd.0 (monmap changed)... 2026-03-07T10:16:47.743 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:47 vm04 ceph-mon[49935]: Reconfiguring daemon osd.0 on vm01 2026-03-07T10:16:47.743 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:47 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:47.743 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:47 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:47.743 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:47 vm04 ceph-mon[49935]: Reconfiguring osd.1 (monmap changed)... 2026-03-07T10:16:47.743 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:47 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-07T10:16:47.743 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:47 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:47.743 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:47 vm04 ceph-mon[49935]: Reconfiguring daemon osd.1 on vm01 2026-03-07T10:16:47.743 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:47 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:47.743 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:47 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:47.743 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:47 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-07T10:16:47.743 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:47 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-07T10:16:47.743 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:47 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:47.743 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:47 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:47.743 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:47 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:47.743 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:47 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.b", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-07T10:16:47.743 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:47 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-07T10:16:47.743 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:47 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:47.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:47 vm01 ceph-mon[49602]: Reconfiguring osd.0 (monmap changed)... 2026-03-07T10:16:47.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:47 vm01 ceph-mon[49602]: Reconfiguring daemon osd.0 on vm01 2026-03-07T10:16:47.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:47 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:47.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:47 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:47.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:47 vm01 ceph-mon[49602]: Reconfiguring osd.1 (monmap changed)... 2026-03-07T10:16:47.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:47 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-07T10:16:47.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:47 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:47.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:47 vm01 ceph-mon[49602]: Reconfiguring daemon osd.1 on vm01 2026-03-07T10:16:47.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:47 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:47.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:47 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:47.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:47 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-07T10:16:47.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:47 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-07T10:16:47.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:47 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:47.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:47 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:47.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:47 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:47.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:47 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.b", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-07T10:16:47.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:47 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-07T10:16:47.975 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:47 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:48.856 INFO:teuthology.orchestra.run.vm01.stderr:++ ceph orch ps --hostname vm07 2026-03-07T10:16:49.031 INFO:teuthology.orchestra.run.vm01.stderr:+ HOST_C_DAEMONS='NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-07T10:16:49.031 INFO:teuthology.orchestra.run.vm01.stderr:osd.5 vm07 running (33s) 4s ago 33s 35.7M 1664M 19.2.3-39-g340d3c24fc6 8bccc98d839a 809d1aaf4e8c ' 2026-03-07T10:16:49.031 INFO:teuthology.orchestra.run.vm01.stderr:+ '[' 'NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-07T10:16:49.031 INFO:teuthology.orchestra.run.vm01.stderr:osd.5 vm07 running (33s) 4s ago 33s 35.7M 1664M 19.2.3-39-g340d3c24fc6 8bccc98d839a 809d1aaf4e8c ' '!=' 'No daemons reported' ']' 2026-03-07T10:16:49.031 INFO:teuthology.orchestra.run.vm01.stderr:+ sleep 15 2026-03-07T10:16:49.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:48 vm04 ceph-mon[49935]: Reconfiguring mon.b (monmap changed)... 2026-03-07T10:16:49.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:48 vm04 ceph-mon[49935]: Reconfiguring daemon mon.b on vm04 2026-03-07T10:16:49.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:48 vm04 ceph-mon[49935]: Reconfiguring mgr.b (monmap changed)... 2026-03-07T10:16:49.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:48 vm04 ceph-mon[49935]: Reconfiguring daemon mgr.b on vm04 2026-03-07T10:16:49.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:48 vm04 ceph-mon[49935]: pgmap v99: 1 pgs: 1 active+clean+wait; 449 KiB data, 134 MiB used, 100 GiB / 100 GiB avail; 57 KiB/s, 0 objects/s recovering 2026-03-07T10:16:49.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:48 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:49.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:48 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:49.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:48 vm04 ceph-mon[49935]: Reconfiguring osd.2 (monmap changed)... 2026-03-07T10:16:49.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:48 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-07T10:16:49.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:48 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:49.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:48 vm04 ceph-mon[49935]: Reconfiguring daemon osd.2 on vm04 2026-03-07T10:16:49.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:48 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:49.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:48 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:49.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:48 vm04 ceph-mon[49935]: Reconfiguring osd.3 (monmap changed)... 2026-03-07T10:16:49.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:48 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-07T10:16:49.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:48 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:49.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:48 vm04 ceph-mon[49935]: Reconfiguring daemon osd.3 on vm04 2026-03-07T10:16:49.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:48 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:49.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:48 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:49.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:48 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-07T10:16:49.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:48 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:49.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:48 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:49.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:48 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:49.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:48 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:49.101 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:48 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:16:49.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:48 vm01 ceph-mon[49602]: Reconfiguring mon.b (monmap changed)... 2026-03-07T10:16:49.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:48 vm01 ceph-mon[49602]: Reconfiguring daemon mon.b on vm04 2026-03-07T10:16:49.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:48 vm01 ceph-mon[49602]: Reconfiguring mgr.b (monmap changed)... 2026-03-07T10:16:49.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:48 vm01 ceph-mon[49602]: Reconfiguring daemon mgr.b on vm04 2026-03-07T10:16:49.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:48 vm01 ceph-mon[49602]: pgmap v99: 1 pgs: 1 active+clean+wait; 449 KiB data, 134 MiB used, 100 GiB / 100 GiB avail; 57 KiB/s, 0 objects/s recovering 2026-03-07T10:16:49.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:48 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:49.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:48 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:49.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:48 vm01 ceph-mon[49602]: Reconfiguring osd.2 (monmap changed)... 2026-03-07T10:16:49.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:48 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-07T10:16:49.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:48 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:49.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:48 vm01 ceph-mon[49602]: Reconfiguring daemon osd.2 on vm04 2026-03-07T10:16:49.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:48 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:49.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:48 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:49.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:48 vm01 ceph-mon[49602]: Reconfiguring osd.3 (monmap changed)... 2026-03-07T10:16:49.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:48 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-07T10:16:49.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:48 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:49.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:48 vm01 ceph-mon[49602]: Reconfiguring daemon osd.3 on vm04 2026-03-07T10:16:49.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:48 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:49.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:48 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:49.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:48 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-07T10:16:49.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:48 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:49.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:48 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:49.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:48 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:49.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:48 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:49.225 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:48 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:16:50.440 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:50 vm01 ceph-mon[49602]: Reconfiguring osd.5 (monmap changed)... 2026-03-07T10:16:50.440 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:50 vm01 ceph-mon[49602]: Reconfiguring daemon osd.5 on vm07 2026-03-07T10:16:50.440 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:50 vm01 ceph-mon[49602]: from='client.14534 -' entity='client.admin' cmd=[{"prefix": "orch ps", "hostname": "vm07", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:16:50.440 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:50 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:50.440 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:50 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:50.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:50 vm04 ceph-mon[49935]: Reconfiguring osd.5 (monmap changed)... 2026-03-07T10:16:50.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:50 vm04 ceph-mon[49935]: Reconfiguring daemon osd.5 on vm07 2026-03-07T10:16:50.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:50 vm04 ceph-mon[49935]: from='client.14534 -' entity='client.admin' cmd=[{"prefix": "orch ps", "hostname": "vm07", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:16:50.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:50 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:50.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:50 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:50.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:50 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mon-a[49598]: 2026-03-07T10:16:50.440+0000 7fb675298640 -1 mon.a@0(leader).osd e41 definitely_dead 0 2026-03-07T10:16:51.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:51 vm01 ceph-mon[49602]: pgmap v100: 1 pgs: 1 active+clean+wait; 449 KiB data, 134 MiB used, 100 GiB / 100 GiB avail; 49 KiB/s, 0 objects/s recovering 2026-03-07T10:16:51.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:51 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:51.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:51 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:51.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:51 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:51.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:51 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:16:51.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:51 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:51.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:51 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:51.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:51 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:51.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:51 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd safe-to-destroy", "ids": ["5"]}]: dispatch 2026-03-07T10:16:51.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:51 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd down", "ids": ["5"]}]: dispatch 2026-03-07T10:16:51.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:51 vm04 ceph-mon[49935]: pgmap v100: 1 pgs: 1 active+clean+wait; 449 KiB data, 134 MiB used, 100 GiB / 100 GiB avail; 49 KiB/s, 0 objects/s recovering 2026-03-07T10:16:51.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:51 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:51.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:51 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:51.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:51 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:51.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:51 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:16:51.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:51 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:51.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:51 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:51.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:51 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:51.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:51 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd safe-to-destroy", "ids": ["5"]}]: dispatch 2026-03-07T10:16:51.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:51 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd down", "ids": ["5"]}]: dispatch 2026-03-07T10:16:51.899 INFO:journalctl@ceph.osd.5.vm07.stdout:Mar 07 10:16:51 vm07 systemd[1]: Stopping Ceph osd.5 for 3fd6e214-1a0e-11f1-b256-99cfc35f3328... 2026-03-07T10:16:51.899 INFO:journalctl@ceph.osd.5.vm07.stdout:Mar 07 10:16:51 vm07 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-5[75975]: 2026-03-07T10:16:51.679+0000 7f99154b8640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.5 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-07T10:16:51.899 INFO:journalctl@ceph.osd.5.vm07.stdout:Mar 07 10:16:51 vm07 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-5[75975]: 2026-03-07T10:16:51.679+0000 7f99154b8640 -1 osd.5 41 *** Got signal Terminated *** 2026-03-07T10:16:51.899 INFO:journalctl@ceph.osd.5.vm07.stdout:Mar 07 10:16:51 vm07 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-5[75975]: 2026-03-07T10:16:51.679+0000 7f99154b8640 -1 osd.5 41 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-07T10:16:51.899 INFO:journalctl@ceph.osd.5.vm07.stdout:Mar 07 10:16:51 vm07 podman[88394]: 2026-03-07 10:16:51.703026439 +0000 UTC m=+0.040171008 container died 809d1aaf4e8cb8c7eb74b9293a8f49051f823e4982a47eaa597fbd213dd1dea4 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-5, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6) 2026-03-07T10:16:51.899 INFO:journalctl@ceph.osd.5.vm07.stdout:Mar 07 10:16:51 vm07 podman[88394]: 2026-03-07 10:16:51.733353792 +0000 UTC m=+0.070498361 container remove 809d1aaf4e8cb8c7eb74b9293a8f49051f823e4982a47eaa597fbd213dd1dea4 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-5, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8) 2026-03-07T10:16:51.899 INFO:journalctl@ceph.osd.5.vm07.stdout:Mar 07 10:16:51 vm07 bash[88394]: ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-5 2026-03-07T10:16:52.182 INFO:journalctl@ceph.osd.5.vm07.stdout:Mar 07 10:16:51 vm07 podman[88461]: 2026-03-07 10:16:51.899131132 +0000 UTC m=+0.018252213 container create ebff5e964f855d9fb4a3df11f2aa5a2141fc38ad13e4be90c40e0f8c591eae79 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-5-deactivate, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8) 2026-03-07T10:16:52.182 INFO:journalctl@ceph.osd.5.vm07.stdout:Mar 07 10:16:51 vm07 podman[88461]: 2026-03-07 10:16:51.941391088 +0000 UTC m=+0.060512178 container init ebff5e964f855d9fb4a3df11f2aa5a2141fc38ad13e4be90c40e0f8c591eae79 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-5-deactivate, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git) 2026-03-07T10:16:52.182 INFO:journalctl@ceph.osd.5.vm07.stdout:Mar 07 10:16:51 vm07 podman[88461]: 2026-03-07 10:16:51.94513783 +0000 UTC m=+0.064258911 container start ebff5e964f855d9fb4a3df11f2aa5a2141fc38ad13e4be90c40e0f8c591eae79 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-5-deactivate, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True) 2026-03-07T10:16:52.182 INFO:journalctl@ceph.osd.5.vm07.stdout:Mar 07 10:16:51 vm07 podman[88461]: 2026-03-07 10:16:51.949959635 +0000 UTC m=+0.069080726 container attach ebff5e964f855d9fb4a3df11f2aa5a2141fc38ad13e4be90c40e0f8c591eae79 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-5-deactivate, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True) 2026-03-07T10:16:52.182 INFO:journalctl@ceph.osd.5.vm07.stdout:Mar 07 10:16:51 vm07 podman[88461]: 2026-03-07 10:16:51.891542939 +0000 UTC m=+0.010664030 image pull 8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1 harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0 2026-03-07T10:16:52.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:52 vm01 ceph-mon[49602]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:52.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:52 vm01 ceph-mon[49602]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:52.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:52 vm01 ceph-mon[49602]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:52.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:52 vm01 ceph-mon[49602]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd safe-to-destroy", "ids": ["5"]}]: dispatch 2026-03-07T10:16:52.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:52 vm01 ceph-mon[49602]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-07T10:16:52.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:52 vm01 ceph-mon[49602]: Health check failed: 1 host (1 osds) down (OSD_HOST_DOWN) 2026-03-07T10:16:52.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:52 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd='[{"prefix": "osd down", "ids": ["5"]}]': finished 2026-03-07T10:16:52.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:52 vm01 ceph-mon[49602]: osdmap e42: 5 total, 4 up, 5 in 2026-03-07T10:16:52.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:52 vm04 ceph-mon[49935]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:52.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:52 vm04 ceph-mon[49935]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:52.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:52 vm04 ceph-mon[49935]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd df", "format": "json"}]: dispatch 2026-03-07T10:16:52.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:52 vm04 ceph-mon[49935]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd safe-to-destroy", "ids": ["5"]}]: dispatch 2026-03-07T10:16:52.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:52 vm04 ceph-mon[49935]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-07T10:16:52.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:52 vm04 ceph-mon[49935]: Health check failed: 1 host (1 osds) down (OSD_HOST_DOWN) 2026-03-07T10:16:52.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:52 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd='[{"prefix": "osd down", "ids": ["5"]}]': finished 2026-03-07T10:16:52.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:52 vm04 ceph-mon[49935]: osdmap e42: 5 total, 4 up, 5 in 2026-03-07T10:16:52.683 INFO:journalctl@ceph.osd.5.vm07.stdout:Mar 07 10:16:52 vm07 podman[88481]: 2026-03-07 10:16:52.345459276 +0000 UTC m=+0.009611801 container died ebff5e964f855d9fb4a3df11f2aa5a2141fc38ad13e4be90c40e0f8c591eae79 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-5-deactivate, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8) 2026-03-07T10:16:52.683 INFO:journalctl@ceph.osd.5.vm07.stdout:Mar 07 10:16:52 vm07 podman[88481]: 2026-03-07 10:16:52.366793085 +0000 UTC m=+0.030945610 container remove ebff5e964f855d9fb4a3df11f2aa5a2141fc38ad13e4be90c40e0f8c591eae79 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-5-deactivate, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True) 2026-03-07T10:16:52.683 INFO:journalctl@ceph.osd.5.vm07.stdout:Mar 07 10:16:52 vm07 systemd[1]: ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@osd.5.service: Deactivated successfully. 2026-03-07T10:16:52.683 INFO:journalctl@ceph.osd.5.vm07.stdout:Mar 07 10:16:52 vm07 systemd[1]: Stopped Ceph osd.5 for 3fd6e214-1a0e-11f1-b256-99cfc35f3328. 2026-03-07T10:16:52.683 INFO:journalctl@ceph.osd.5.vm07.stdout:Mar 07 10:16:52 vm07 systemd[1]: ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@osd.5.service: Consumed 1.107s CPU time, 56.4M memory peak. 2026-03-07T10:16:53.376 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:53 vm01 ceph-mon[49602]: osd.5 now down 2026-03-07T10:16:53.376 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:53 vm01 ceph-mon[49602]: Removing daemon osd.5 from vm07 -- ports [] 2026-03-07T10:16:53.376 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:53 vm01 ceph-mon[49602]: pgmap v102: 1 pgs: 1 active+clean+wait; 449 KiB data, 134 MiB used, 100 GiB / 100 GiB avail; 45 KiB/s, 0 objects/s recovering 2026-03-07T10:16:53.376 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:53 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth rm", "entity": "osd.5"}]: dispatch 2026-03-07T10:16:53.376 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:53 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd='[{"prefix": "auth rm", "entity": "osd.5"}]': finished 2026-03-07T10:16:53.376 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:53 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd purge-actual", "id": 5, "yes_i_really_mean_it": true}]: dispatch 2026-03-07T10:16:53.376 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:53 vm01 ceph-mon[49602]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-07T10:16:53.376 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:53 vm01 ceph-mon[49602]: Health check cleared: OSD_HOST_DOWN (was: 1 host (1 osds) down) 2026-03-07T10:16:53.376 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:53 vm01 ceph-mon[49602]: Cluster is now healthy 2026-03-07T10:16:53.376 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:53 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd='[{"prefix": "osd purge-actual", "id": 5, "yes_i_really_mean_it": true}]': finished 2026-03-07T10:16:53.376 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:53 vm01 ceph-mon[49602]: osdmap e43: 4 total, 4 up, 4 in 2026-03-07T10:16:53.502 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:53 vm04 ceph-mon[49935]: osd.5 now down 2026-03-07T10:16:53.502 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:53 vm04 ceph-mon[49935]: Removing daemon osd.5 from vm07 -- ports [] 2026-03-07T10:16:53.502 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:53 vm04 ceph-mon[49935]: pgmap v102: 1 pgs: 1 active+clean+wait; 449 KiB data, 134 MiB used, 100 GiB / 100 GiB avail; 45 KiB/s, 0 objects/s recovering 2026-03-07T10:16:53.502 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:53 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth rm", "entity": "osd.5"}]: dispatch 2026-03-07T10:16:53.502 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:53 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd='[{"prefix": "auth rm", "entity": "osd.5"}]': finished 2026-03-07T10:16:53.502 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:53 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd purge-actual", "id": 5, "yes_i_really_mean_it": true}]: dispatch 2026-03-07T10:16:53.502 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:53 vm04 ceph-mon[49935]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-07T10:16:53.502 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:53 vm04 ceph-mon[49935]: Health check cleared: OSD_HOST_DOWN (was: 1 host (1 osds) down) 2026-03-07T10:16:53.502 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:53 vm04 ceph-mon[49935]: Cluster is now healthy 2026-03-07T10:16:53.502 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:53 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd='[{"prefix": "osd purge-actual", "id": 5, "yes_i_really_mean_it": true}]': finished 2026-03-07T10:16:53.502 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:53 vm04 ceph-mon[49935]: osdmap e43: 4 total, 4 up, 4 in 2026-03-07T10:16:54.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:54 vm01 ceph-mon[49602]: Removing key for osd.5 2026-03-07T10:16:54.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:54 vm01 ceph-mon[49602]: Successfully removed osd.5 on vm07 2026-03-07T10:16:54.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:54 vm01 ceph-mon[49602]: Successfully purged osd.5 on vm07 2026-03-07T10:16:54.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:54 vm01 ceph-mon[49602]: Zapping devices for osd.5 on vm07 2026-03-07T10:16:54.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:54 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:54.601 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:54 vm04 ceph-mon[49935]: Removing key for osd.5 2026-03-07T10:16:54.601 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:54 vm04 ceph-mon[49935]: Successfully removed osd.5 on vm07 2026-03-07T10:16:54.601 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:54 vm04 ceph-mon[49935]: Successfully purged osd.5 on vm07 2026-03-07T10:16:54.601 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:54 vm04 ceph-mon[49935]: Zapping devices for osd.5 on vm07 2026-03-07T10:16:54.601 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:54 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:55.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:55 vm01 ceph-mon[49602]: pgmap v104: 1 pgs: 1 active+clean+wait; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:16:55.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:55 vm01 ceph-mon[49602]: Successfully zapped devices for osd.5 on vm07 2026-03-07T10:16:55.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:55 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:55.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:55 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:16:55.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:55 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:55.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:55 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:55.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:55 vm04 ceph-mon[49935]: pgmap v104: 1 pgs: 1 active+clean+wait; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:16:55.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:55 vm04 ceph-mon[49935]: Successfully zapped devices for osd.5 on vm07 2026-03-07T10:16:55.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:55 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:55.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:55 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:16:55.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:55 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:55.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:55 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:57.363 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:57 vm01 ceph-mon[49602]: pgmap v105: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:16:57.363 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:57 vm01 ceph-mon[49602]: Detected new or changed devices on vm07 2026-03-07T10:16:57.363 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:57 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:57.363 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:57 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:57.363 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:57 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:57.363 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:57 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:16:57.363 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:57 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:57.363 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:57 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:16:57.363 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:57 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:57.363 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:57 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:57.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:57 vm04 ceph-mon[49935]: pgmap v105: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:16:57.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:57 vm04 ceph-mon[49935]: Detected new or changed devices on vm07 2026-03-07T10:16:57.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:57 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:57.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:57 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:57.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:57 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:57.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:57 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:16:57.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:57 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:57.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:57 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:16:57.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:57 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:57.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:57 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:59.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:59 vm04 ceph-mon[49935]: pgmap v106: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:16:59.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:59 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:59.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:59 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:59.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:59 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:59.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:59 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:16:59.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:59 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:59.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:59 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:59.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:59 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:59.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:16:59 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:59.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:59 vm01 ceph-mon[49602]: pgmap v106: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:16:59.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:59 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:59.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:59 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:59.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:59 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:16:59.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:59 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:16:59.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:59 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:59.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:59 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:59.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:59 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:16:59.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:16:59 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:17:01.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:01 vm04 ceph-mon[49935]: pgmap v107: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:17:01.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:01 vm01 ceph-mon[49602]: pgmap v107: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:17:03.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:03 vm04 ceph-mon[49935]: pgmap v108: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:17:03.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:03 vm01 ceph-mon[49602]: pgmap v108: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:17:04.034 INFO:teuthology.orchestra.run.vm01.stderr:++ ceph orch ps --hostname vm07 2026-03-07T10:17:04.200 INFO:teuthology.orchestra.run.vm01.stderr:+ HOST_C_DAEMONS='No daemons reported' 2026-03-07T10:17:04.200 INFO:teuthology.orchestra.run.vm01.stderr:+ '[' 'No daemons reported' '!=' 'No daemons reported' ']' 2026-03-07T10:17:04.200 INFO:teuthology.orchestra.run.vm01.stderr:+ ceph osd getcrushmap -o compiled-crushmap 2026-03-07T10:17:04.352 INFO:teuthology.orchestra.run.vm01.stderr:17 2026-03-07T10:17:04.362 INFO:teuthology.orchestra.run.vm01.stderr:+ crushtool -d compiled-crushmap -o crushmap.txt 2026-03-07T10:17:04.372 INFO:teuthology.orchestra.run.vm01.stderr:++ cat crushmap.txt 2026-03-07T10:17:04.378 INFO:teuthology.orchestra.run.vm01.stderr:+ CRUSH_MAP='# begin crush map 2026-03-07T10:17:04.378 INFO:teuthology.orchestra.run.vm01.stderr:tunable choose_local_tries 0 2026-03-07T10:17:04.378 INFO:teuthology.orchestra.run.vm01.stderr:tunable choose_local_fallback_tries 0 2026-03-07T10:17:04.378 INFO:teuthology.orchestra.run.vm01.stderr:tunable choose_total_tries 50 2026-03-07T10:17:04.378 INFO:teuthology.orchestra.run.vm01.stderr:tunable chooseleaf_descend_once 1 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr:tunable chooseleaf_vary_r 1 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr:tunable chooseleaf_stable 1 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr:tunable straw_calc_version 1 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr:tunable allowed_bucket_algs 54 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr: 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr:# devices 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr:device 0 osd.0 class hdd 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr:device 1 osd.1 class hdd 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr:device 2 osd.2 class hdd 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr:device 3 osd.3 class hdd 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr: 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr:# types 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr:type 0 osd 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr:type 1 host 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr:type 2 chassis 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr:type 3 rack 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr:type 4 row 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr:type 5 pdu 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr:type 6 pod 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr:type 7 room 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr:type 8 datacenter 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr:type 9 zone 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr:type 10 region 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr:type 11 root 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr: 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr:# buckets 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr:host vm01 { 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr: id -3 # do not change unnecessarily 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr: id -4 class hdd # do not change unnecessarily 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr: # weight 0.03897 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr: alg straw2 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr: hash 0 # rjenkins1 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr: item osd.0 weight 0.01949 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr: item osd.1 weight 0.01949 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr:} 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr:host vm04 { 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr: id -5 # do not change unnecessarily 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr: id -6 class hdd # do not change unnecessarily 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr: # weight 0.03897 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr: alg straw2 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr: hash 0 # rjenkins1 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr: item osd.2 weight 0.01949 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr: item osd.3 weight 0.01949 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr:} 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr:host vm07 { 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr: id -7 # do not change unnecessarily 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr: id -8 class hdd # do not change unnecessarily 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr: # weight 0.00000 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr: alg straw2 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr: hash 0 # rjenkins1 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr:} 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr:root default { 2026-03-07T10:17:04.379 INFO:teuthology.orchestra.run.vm01.stderr: id -1 # do not change unnecessarily 2026-03-07T10:17:04.380 INFO:teuthology.orchestra.run.vm01.stderr: id -2 class hdd # do not change unnecessarily 2026-03-07T10:17:04.380 INFO:teuthology.orchestra.run.vm01.stderr: # weight 0.07794 2026-03-07T10:17:04.380 INFO:teuthology.orchestra.run.vm01.stderr: alg straw2 2026-03-07T10:17:04.380 INFO:teuthology.orchestra.run.vm01.stderr: hash 0 # rjenkins1 2026-03-07T10:17:04.380 INFO:teuthology.orchestra.run.vm01.stderr: item vm01 weight 0.03897 2026-03-07T10:17:04.380 INFO:teuthology.orchestra.run.vm01.stderr: item vm04 weight 0.03897 2026-03-07T10:17:04.380 INFO:teuthology.orchestra.run.vm01.stderr: item vm07 weight 0.00000 2026-03-07T10:17:04.380 INFO:teuthology.orchestra.run.vm01.stderr:} 2026-03-07T10:17:04.380 INFO:teuthology.orchestra.run.vm01.stderr: 2026-03-07T10:17:04.380 INFO:teuthology.orchestra.run.vm01.stderr:# rules 2026-03-07T10:17:04.380 INFO:teuthology.orchestra.run.vm01.stderr:rule replicated_rule { 2026-03-07T10:17:04.380 INFO:teuthology.orchestra.run.vm01.stderr: id 0 2026-03-07T10:17:04.380 INFO:teuthology.orchestra.run.vm01.stderr: type replicated 2026-03-07T10:17:04.380 INFO:teuthology.orchestra.run.vm01.stderr: step take default 2026-03-07T10:17:04.380 INFO:teuthology.orchestra.run.vm01.stderr: step choose firstn 0 type osd 2026-03-07T10:17:04.380 INFO:teuthology.orchestra.run.vm01.stderr: step emit 2026-03-07T10:17:04.380 INFO:teuthology.orchestra.run.vm01.stderr:} 2026-03-07T10:17:04.380 INFO:teuthology.orchestra.run.vm01.stderr: 2026-03-07T10:17:04.380 INFO:teuthology.orchestra.run.vm01.stderr:# end crush map' 2026-03-07T10:17:04.380 INFO:teuthology.orchestra.run.vm01.stderr:+ grep -q vm07 2026-03-07T10:17:04.380 INFO:teuthology.orchestra.run.vm01.stderr:+ ceph orch host rm vm07 --rm-crush-entry 2026-03-07T10:17:05.077 INFO:teuthology.orchestra.run.vm01.stdout:Removed host 'vm07' 2026-03-07T10:17:05.089 INFO:teuthology.orchestra.run.vm01.stderr:+ sleep 30 2026-03-07T10:17:05.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:05 vm04 ceph-mon[49935]: pgmap v109: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:17:05.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:05 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/3066466025' entity='client.admin' cmd=[{"prefix": "osd getcrushmap"}]: dispatch 2026-03-07T10:17:05.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:05 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd crush remove", "name": "vm07"}]: dispatch 2026-03-07T10:17:05.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:05 vm01 ceph-mon[49602]: pgmap v109: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:17:05.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:05 vm01 ceph-mon[49602]: from='client.? 192.168.123.101:0/3066466025' entity='client.admin' cmd=[{"prefix": "osd getcrushmap"}]: dispatch 2026-03-07T10:17:05.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:05 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "osd crush remove", "name": "vm07"}]: dispatch 2026-03-07T10:17:06.351 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:06 vm04 ceph-mon[49935]: from='client.14538 -' entity='client.admin' cmd=[{"prefix": "orch ps", "hostname": "vm07", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:17:06.352 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:06 vm04 ceph-mon[49935]: from='client.14546 -' entity='client.admin' cmd=[{"prefix": "orch host rm", "hostname": "vm07", "rm_crush_entry": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:17:06.352 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:06 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd='[{"prefix": "osd crush remove", "name": "vm07"}]': finished 2026-03-07T10:17:06.352 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:06 vm04 ceph-mon[49935]: osdmap e44: 4 total, 4 up, 4 in 2026-03-07T10:17:06.352 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:06 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:17:06.352 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:06 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/host.vm07"}]: dispatch 2026-03-07T10:17:06.352 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:06 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/host.vm07"}]': finished 2026-03-07T10:17:06.352 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:06 vm04 ceph-mon[49935]: Removed host vm07 2026-03-07T10:17:06.352 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:06 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:17:06.352 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:06 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:17:06.352 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:06 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:17:06.352 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:06 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:17:06.352 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:06 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:17:06.352 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:06 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:17:06.352 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:06 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:17:06.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:06 vm01 ceph-mon[49602]: from='client.14538 -' entity='client.admin' cmd=[{"prefix": "orch ps", "hostname": "vm07", "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:17:06.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:06 vm01 ceph-mon[49602]: from='client.14546 -' entity='client.admin' cmd=[{"prefix": "orch host rm", "hostname": "vm07", "rm_crush_entry": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-07T10:17:06.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:06 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd='[{"prefix": "osd crush remove", "name": "vm07"}]': finished 2026-03-07T10:17:06.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:06 vm01 ceph-mon[49602]: osdmap e44: 4 total, 4 up, 4 in 2026-03-07T10:17:06.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:06 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:17:06.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:06 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/host.vm07"}]: dispatch 2026-03-07T10:17:06.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:06 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/host.vm07"}]': finished 2026-03-07T10:17:06.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:06 vm01 ceph-mon[49602]: Removed host vm07 2026-03-07T10:17:06.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:06 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-07T10:17:06.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:06 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-07T10:17:06.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:06 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-07T10:17:06.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:06 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:17:06.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:06 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:17:06.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:06 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:17:06.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:06 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:17:07.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:07 vm04 ceph-mon[49935]: pgmap v111: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:17:07.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:07 vm01 ceph-mon[49602]: pgmap v111: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:17:09.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:09 vm04 ceph-mon[49935]: pgmap v112: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:17:09.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:09 vm01 ceph-mon[49602]: pgmap v112: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:17:11.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:11 vm04 ceph-mon[49935]: pgmap v113: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:17:11.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:11 vm01 ceph-mon[49602]: pgmap v113: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:17:13.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:13 vm04 ceph-mon[49935]: pgmap v114: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:17:13.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:13 vm01 ceph-mon[49602]: pgmap v114: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:17:15.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:15 vm04 ceph-mon[49935]: pgmap v115: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:17:15.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:15 vm01 ceph-mon[49602]: pgmap v115: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:17:17.350 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:17 vm04 ceph-mon[49935]: pgmap v116: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:17:17.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:17 vm01 ceph-mon[49602]: pgmap v116: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:17:19.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:19 vm01 ceph-mon[49602]: pgmap v117: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:17:19.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:19 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:17:19.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:19 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:17:19.475 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:19 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:17:19.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:19 vm04 ceph-mon[49935]: pgmap v117: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:17:19.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:19 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:17:19.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:19 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:17:19.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:19 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:17:21.474 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:21 vm01 ceph-mon[49602]: pgmap v118: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:17:21.600 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:21 vm04 ceph-mon[49935]: pgmap v118: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:17:23.402 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:23 vm04 ceph-mon[49935]: pgmap v119: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:17:23.432 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:23 vm01 ceph-mon[49602]: pgmap v119: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:17:24.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:24 vm01 ceph-mon[49602]: pgmap v120: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:17:24.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:24 vm04 ceph-mon[49935]: pgmap v120: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:17:26.885 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:26 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:17:26.885 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:26 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:17:26.885 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:26 vm04 ceph-mon[49935]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:17:26.885 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:26 vm04 ceph-mon[49935]: pgmap v121: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:17:26.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:26 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:17:26.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:26 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:17:26.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:26 vm01 ceph-mon[49602]: from='mgr.14156 192.168.123.101:0/3369241016' entity='mgr.a' 2026-03-07T10:17:26.974 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:26 vm01 ceph-mon[49602]: pgmap v121: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:17:29.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:28 vm04 ceph-mon[49935]: pgmap v122: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:17:29.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:28 vm01 ceph-mon[49602]: pgmap v122: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:17:31.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:30 vm04 ceph-mon[49935]: pgmap v123: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:17:31.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:30 vm01 ceph-mon[49602]: pgmap v123: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:17:33.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:32 vm04 ceph-mon[49935]: pgmap v124: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:17:33.224 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:32 vm01 ceph-mon[49602]: pgmap v124: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:17:35.092 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:34 vm01 ceph-mon[49602]: pgmap v125: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:17:35.092 INFO:teuthology.orchestra.run.vm01.stderr:+ ceph osd getcrushmap -o compiled-crushmap 2026-03-07T10:17:35.100 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:34 vm04 ceph-mon[49935]: pgmap v125: 1 pgs: 1 active+clean; 449 KiB data, 108 MiB used, 80 GiB / 80 GiB avail 2026-03-07T10:17:35.244 INFO:teuthology.orchestra.run.vm01.stderr:18 2026-03-07T10:17:35.254 INFO:teuthology.orchestra.run.vm01.stderr:+ crushtool -d compiled-crushmap -o crushmap.txt 2026-03-07T10:17:35.266 INFO:teuthology.orchestra.run.vm01.stderr:++ cat crushmap.txt 2026-03-07T10:17:35.269 INFO:teuthology.orchestra.run.vm01.stderr:+ CRUSH_MAP='# begin crush map 2026-03-07T10:17:35.269 INFO:teuthology.orchestra.run.vm01.stderr:tunable choose_local_tries 0 2026-03-07T10:17:35.269 INFO:teuthology.orchestra.run.vm01.stderr:tunable choose_local_fallback_tries 0 2026-03-07T10:17:35.269 INFO:teuthology.orchestra.run.vm01.stderr:tunable choose_total_tries 50 2026-03-07T10:17:35.269 INFO:teuthology.orchestra.run.vm01.stderr:tunable chooseleaf_descend_once 1 2026-03-07T10:17:35.269 INFO:teuthology.orchestra.run.vm01.stderr:tunable chooseleaf_vary_r 1 2026-03-07T10:17:35.269 INFO:teuthology.orchestra.run.vm01.stderr:tunable chooseleaf_stable 1 2026-03-07T10:17:35.269 INFO:teuthology.orchestra.run.vm01.stderr:tunable straw_calc_version 1 2026-03-07T10:17:35.269 INFO:teuthology.orchestra.run.vm01.stderr:tunable allowed_bucket_algs 54 2026-03-07T10:17:35.269 INFO:teuthology.orchestra.run.vm01.stderr: 2026-03-07T10:17:35.269 INFO:teuthology.orchestra.run.vm01.stderr:# devices 2026-03-07T10:17:35.269 INFO:teuthology.orchestra.run.vm01.stderr:device 0 osd.0 class hdd 2026-03-07T10:17:35.269 INFO:teuthology.orchestra.run.vm01.stderr:device 1 osd.1 class hdd 2026-03-07T10:17:35.269 INFO:teuthology.orchestra.run.vm01.stderr:device 2 osd.2 class hdd 2026-03-07T10:17:35.269 INFO:teuthology.orchestra.run.vm01.stderr:device 3 osd.3 class hdd 2026-03-07T10:17:35.269 INFO:teuthology.orchestra.run.vm01.stderr: 2026-03-07T10:17:35.269 INFO:teuthology.orchestra.run.vm01.stderr:# types 2026-03-07T10:17:35.269 INFO:teuthology.orchestra.run.vm01.stderr:type 0 osd 2026-03-07T10:17:35.269 INFO:teuthology.orchestra.run.vm01.stderr:type 1 host 2026-03-07T10:17:35.269 INFO:teuthology.orchestra.run.vm01.stderr:type 2 chassis 2026-03-07T10:17:35.269 INFO:teuthology.orchestra.run.vm01.stderr:type 3 rack 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr:type 4 row 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr:type 5 pdu 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr:type 6 pod 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr:type 7 room 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr:type 8 datacenter 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr:type 9 zone 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr:type 10 region 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr:type 11 root 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr: 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr:# buckets 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr:host vm01 { 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr: id -3 # do not change unnecessarily 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr: id -4 class hdd # do not change unnecessarily 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr: # weight 0.03897 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr: alg straw2 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr: hash 0 # rjenkins1 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr: item osd.0 weight 0.01949 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr: item osd.1 weight 0.01949 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr:} 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr:host vm04 { 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr: id -5 # do not change unnecessarily 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr: id -6 class hdd # do not change unnecessarily 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr: # weight 0.03897 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr: alg straw2 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr: hash 0 # rjenkins1 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr: item osd.2 weight 0.01949 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr: item osd.3 weight 0.01949 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr:} 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr:root default { 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr: id -1 # do not change unnecessarily 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr: id -2 class hdd # do not change unnecessarily 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr: # weight 0.07794 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr: alg straw2 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr: hash 0 # rjenkins1 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr: item vm01 weight 0.03897 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr: item vm04 weight 0.03897 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr:} 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr: 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr:# rules 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr:rule replicated_rule { 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr: id 0 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr: type replicated 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr: step take default 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr: step choose firstn 0 type osd 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr: step emit 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr:} 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr: 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr:# end crush map' 2026-03-07T10:17:35.270 INFO:teuthology.orchestra.run.vm01.stderr:+ grep -q vm07 2026-03-07T10:17:35.501 DEBUG:teuthology.run_tasks:Unwinding manager cephadm 2026-03-07T10:17:35.503 INFO:tasks.cephadm:Teardown begin 2026-03-07T10:17:35.503 DEBUG:teuthology.orchestra.run.vm01:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-07T10:17:35.531 DEBUG:teuthology.orchestra.run.vm04:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-07T10:17:35.562 DEBUG:teuthology.orchestra.run.vm07:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-07T10:17:35.590 INFO:tasks.cephadm:Cleaning up testdir ceph.* files... 2026-03-07T10:17:35.590 DEBUG:teuthology.orchestra.run.vm01:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-03-07T10:17:35.605 DEBUG:teuthology.orchestra.run.vm04:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-03-07T10:17:35.621 DEBUG:teuthology.orchestra.run.vm07:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-03-07T10:17:35.647 INFO:tasks.cephadm:Stopping all daemons... 2026-03-07T10:17:35.647 INFO:tasks.cephadm.mon.a:Stopping mon.a... 2026-03-07T10:17:35.647 DEBUG:teuthology.orchestra.run.vm01:> sudo systemctl stop ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@mon.a 2026-03-07T10:17:35.724 INFO:journalctl@ceph.mon.a.vm01.stdout:Mar 07 10:17:35 vm01 systemd[1]: Stopping Ceph mon.a for 3fd6e214-1a0e-11f1-b256-99cfc35f3328... 2026-03-07T10:17:35.850 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:35 vm04 ceph-mon[49935]: from='client.? 192.168.123.101:0/1792835712' entity='client.admin' cmd=[{"prefix": "osd getcrushmap"}]: dispatch 2026-03-07T10:17:35.943 DEBUG:teuthology.orchestra.run.vm01:> sudo pkill -f 'journalctl -f -n 0 -u ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@mon.a.service' 2026-03-07T10:17:35.983 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-07T10:17:35.983 INFO:tasks.cephadm.mon.a:Stopped mon.a 2026-03-07T10:17:35.983 INFO:tasks.cephadm.mon.c:Stopping mon.b... 2026-03-07T10:17:35.983 DEBUG:teuthology.orchestra.run.vm04:> sudo systemctl stop ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@mon.b 2026-03-07T10:17:36.331 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:36 vm04 systemd[1]: Stopping Ceph mon.b for 3fd6e214-1a0e-11f1-b256-99cfc35f3328... 2026-03-07T10:17:36.331 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:36 vm04 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mon-b[49931]: 2026-03-07T10:17:36.096+0000 7f8c38d2f640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.b -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-07T10:17:36.331 INFO:journalctl@ceph.mon.b.vm04.stdout:Mar 07 10:17:36 vm04 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mon-b[49931]: 2026-03-07T10:17:36.096+0000 7f8c38d2f640 -1 mon.b@1(peon) e4 *** Got Signal Terminated *** 2026-03-07T10:17:36.416 DEBUG:teuthology.orchestra.run.vm04:> sudo pkill -f 'journalctl -f -n 0 -u ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@mon.b.service' 2026-03-07T10:17:36.452 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-07T10:17:36.452 INFO:tasks.cephadm.mon.c:Stopped mon.b 2026-03-07T10:17:36.452 INFO:tasks.cephadm.mon.c:Stopping mon.c... 2026-03-07T10:17:36.452 DEBUG:teuthology.orchestra.run.vm07:> sudo systemctl stop ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@mon.c 2026-03-07T10:17:36.497 DEBUG:teuthology.orchestra.run.vm07:> sudo pkill -f 'journalctl -f -n 0 -u ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@mon.c.service' 2026-03-07T10:17:36.538 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-07T10:17:36.538 INFO:tasks.cephadm.mon.c:Stopped mon.c 2026-03-07T10:17:36.538 INFO:tasks.cephadm.mgr.a:Stopping mgr.a... 2026-03-07T10:17:36.538 DEBUG:teuthology.orchestra.run.vm01:> sudo systemctl stop ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@mgr.a 2026-03-07T10:17:36.798 DEBUG:teuthology.orchestra.run.vm01:> sudo pkill -f 'journalctl -f -n 0 -u ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@mgr.a.service' 2026-03-07T10:17:36.820 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:17:36 vm01 systemd[1]: Stopping Ceph mgr.a for 3fd6e214-1a0e-11f1-b256-99cfc35f3328... 2026-03-07T10:17:36.820 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:17:36 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a[49807]: 2026-03-07T10:17:36.649+0000 7fe8956b3640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mgr -n mgr.a -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-07T10:17:36.820 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:17:36 vm01 podman[68250]: 2026-03-07 10:17:36.697389663 +0000 UTC m=+0.062809275 container died 5dea0b668d181f2256a41d0d38fbc3daa08cd3ace5ab0933ec039c024a47c848 (image=harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8) 2026-03-07T10:17:36.820 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:17:36 vm01 podman[68250]: 2026-03-07 10:17:36.73810663 +0000 UTC m=+0.103526222 container remove 5dea0b668d181f2256a41d0d38fbc3daa08cd3ace5ab0933ec039c024a47c848 (image=harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) 2026-03-07T10:17:36.820 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:17:36 vm01 bash[68250]: ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-mgr-a 2026-03-07T10:17:36.820 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:17:36 vm01 systemd[1]: ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@mgr.a.service: Deactivated successfully. 2026-03-07T10:17:36.820 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:17:36 vm01 systemd[1]: Stopped Ceph mgr.a for 3fd6e214-1a0e-11f1-b256-99cfc35f3328. 2026-03-07T10:17:36.820 INFO:journalctl@ceph.mgr.a.vm01.stdout:Mar 07 10:17:36 vm01 systemd[1]: ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@mgr.a.service: Consumed 42.474s CPU time. 2026-03-07T10:17:36.829 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-07T10:17:36.829 INFO:tasks.cephadm.mgr.a:Stopped mgr.a 2026-03-07T10:17:36.829 INFO:tasks.cephadm.mgr.b:Stopping mgr.b... 2026-03-07T10:17:36.829 DEBUG:teuthology.orchestra.run.vm04:> sudo systemctl stop ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@mgr.b 2026-03-07T10:17:36.966 INFO:journalctl@ceph.mgr.b.vm04.stdout:Mar 07 10:17:36 vm04 systemd[1]: Stopping Ceph mgr.b for 3fd6e214-1a0e-11f1-b256-99cfc35f3328... 2026-03-07T10:17:37.062 DEBUG:teuthology.orchestra.run.vm04:> sudo pkill -f 'journalctl -f -n 0 -u ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@mgr.b.service' 2026-03-07T10:17:37.094 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-07T10:17:37.095 INFO:tasks.cephadm.mgr.b:Stopped mgr.b 2026-03-07T10:17:37.095 INFO:tasks.cephadm.osd.0:Stopping osd.0... 2026-03-07T10:17:37.095 DEBUG:teuthology.orchestra.run.vm01:> sudo systemctl stop ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@osd.0 2026-03-07T10:17:37.474 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 07 10:17:37 vm01 systemd[1]: Stopping Ceph osd.0 for 3fd6e214-1a0e-11f1-b256-99cfc35f3328... 2026-03-07T10:17:37.474 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 07 10:17:37 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-0[58428]: 2026-03-07T10:17:37.201+0000 7f7f70b47640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-07T10:17:37.474 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 07 10:17:37 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-0[58428]: 2026-03-07T10:17:37.201+0000 7f7f70b47640 -1 osd.0 44 *** Got signal Terminated *** 2026-03-07T10:17:37.474 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 07 10:17:37 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-0[58428]: 2026-03-07T10:17:37.201+0000 7f7f70b47640 -1 osd.0 44 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-07T10:17:42.511 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 07 10:17:42 vm01 podman[68353]: 2026-03-07 10:17:42.236257136 +0000 UTC m=+5.047953390 container died 9f424d6f69e48280509f79a03024085de4e19dcf8638449ad46b69393ff96570 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9) 2026-03-07T10:17:42.511 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 07 10:17:42 vm01 podman[68353]: 2026-03-07 10:17:42.260720382 +0000 UTC m=+5.072416636 container remove 9f424d6f69e48280509f79a03024085de4e19dcf8638449ad46b69393ff96570 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-0, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default) 2026-03-07T10:17:42.512 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 07 10:17:42 vm01 bash[68353]: ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-0 2026-03-07T10:17:42.512 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 07 10:17:42 vm01 podman[68767]: 2026-03-07 10:17:42.41969994 +0000 UTC m=+0.017194031 container create aacd0e16ce93eed89863393e14559d20e9506e78ace3a1437f77a325c0c25e9b (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-0-deactivate, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True) 2026-03-07T10:17:42.512 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 07 10:17:42 vm01 podman[68767]: 2026-03-07 10:17:42.457302388 +0000 UTC m=+0.054796479 container init aacd0e16ce93eed89863393e14559d20e9506e78ace3a1437f77a325c0c25e9b (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-0-deactivate, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) 2026-03-07T10:17:42.512 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 07 10:17:42 vm01 podman[68767]: 2026-03-07 10:17:42.460369137 +0000 UTC m=+0.057863228 container start aacd0e16ce93eed89863393e14559d20e9506e78ace3a1437f77a325c0c25e9b (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-0-deactivate, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git) 2026-03-07T10:17:42.512 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 07 10:17:42 vm01 podman[68767]: 2026-03-07 10:17:42.466100443 +0000 UTC m=+0.063594545 container attach aacd0e16ce93eed89863393e14559d20e9506e78ace3a1437f77a325c0c25e9b (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-0-deactivate, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8) 2026-03-07T10:17:42.512 INFO:journalctl@ceph.osd.0.vm01.stdout:Mar 07 10:17:42 vm01 podman[68767]: 2026-03-07 10:17:42.412645427 +0000 UTC m=+0.010139527 image pull 8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1 harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0 2026-03-07T10:17:42.950 DEBUG:teuthology.orchestra.run.vm01:> sudo pkill -f 'journalctl -f -n 0 -u ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@osd.0.service' 2026-03-07T10:17:42.989 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-07T10:17:42.989 INFO:tasks.cephadm.osd.0:Stopped osd.0 2026-03-07T10:17:42.989 INFO:tasks.cephadm.osd.1:Stopping osd.1... 2026-03-07T10:17:42.989 DEBUG:teuthology.orchestra.run.vm01:> sudo systemctl stop ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@osd.1 2026-03-07T10:17:43.225 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 07 10:17:43 vm01 systemd[1]: Stopping Ceph osd.1 for 3fd6e214-1a0e-11f1-b256-99cfc35f3328... 2026-03-07T10:17:43.225 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 07 10:17:43 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-1[61656]: 2026-03-07T10:17:43.147+0000 7f6037eb5640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-07T10:17:43.225 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 07 10:17:43 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-1[61656]: 2026-03-07T10:17:43.147+0000 7f6037eb5640 -1 osd.1 44 *** Got signal Terminated *** 2026-03-07T10:17:43.225 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 07 10:17:43 vm01 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-1[61656]: 2026-03-07T10:17:43.147+0000 7f6037eb5640 -1 osd.1 44 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-07T10:17:48.429 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 07 10:17:48 vm01 podman[68861]: 2026-03-07 10:17:48.169430567 +0000 UTC m=+5.037089588 container died 32a17d739d7f82910b3b0d360639cb17a982973413a3cb904658e8e91723271b (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-1, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8) 2026-03-07T10:17:48.429 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 07 10:17:48 vm01 podman[68861]: 2026-03-07 10:17:48.200128763 +0000 UTC m=+5.067787784 container remove 32a17d739d7f82910b3b0d360639cb17a982973413a3cb904658e8e91723271b (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-1, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default) 2026-03-07T10:17:48.429 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 07 10:17:48 vm01 bash[68861]: ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-1 2026-03-07T10:17:48.429 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 07 10:17:48 vm01 podman[68930]: 2026-03-07 10:17:48.335574316 +0000 UTC m=+0.013794860 container create 7c62a9f95c46efed55dcb230e8738557729fde6584f67200e0e3594510003e02 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-1-deactivate, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True) 2026-03-07T10:17:48.429 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 07 10:17:48 vm01 podman[68930]: 2026-03-07 10:17:48.372538828 +0000 UTC m=+0.050759383 container init 7c62a9f95c46efed55dcb230e8738557729fde6584f67200e0e3594510003e02 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-1-deactivate, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git) 2026-03-07T10:17:48.429 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 07 10:17:48 vm01 podman[68930]: 2026-03-07 10:17:48.377394095 +0000 UTC m=+0.055614650 container start 7c62a9f95c46efed55dcb230e8738557729fde6584f67200e0e3594510003e02 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-1-deactivate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9) 2026-03-07T10:17:48.429 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 07 10:17:48 vm01 podman[68930]: 2026-03-07 10:17:48.378404437 +0000 UTC m=+0.056624992 container attach 7c62a9f95c46efed55dcb230e8738557729fde6584f67200e0e3594510003e02 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-1-deactivate, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default) 2026-03-07T10:17:48.430 INFO:journalctl@ceph.osd.1.vm01.stdout:Mar 07 10:17:48 vm01 podman[68930]: 2026-03-07 10:17:48.329856994 +0000 UTC m=+0.008077559 image pull 8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1 harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0 2026-03-07T10:17:48.810 DEBUG:teuthology.orchestra.run.vm01:> sudo pkill -f 'journalctl -f -n 0 -u ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@osd.1.service' 2026-03-07T10:17:48.845 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-07T10:17:48.845 INFO:tasks.cephadm.osd.1:Stopped osd.1 2026-03-07T10:17:48.845 INFO:tasks.cephadm.osd.2:Stopping osd.2... 2026-03-07T10:17:48.845 DEBUG:teuthology.orchestra.run.vm04:> sudo systemctl stop ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@osd.2 2026-03-07T10:17:49.351 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 07 10:17:48 vm04 systemd[1]: Stopping Ceph osd.2 for 3fd6e214-1a0e-11f1-b256-99cfc35f3328... 2026-03-07T10:17:49.351 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 07 10:17:48 vm04 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-2[54651]: 2026-03-07T10:17:48.959+0000 7fc6c9c92640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-07T10:17:49.351 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 07 10:17:48 vm04 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-2[54651]: 2026-03-07T10:17:48.959+0000 7fc6c9c92640 -1 osd.2 44 *** Got signal Terminated *** 2026-03-07T10:17:49.351 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 07 10:17:48 vm04 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-2[54651]: 2026-03-07T10:17:48.959+0000 7fc6c9c92640 -1 osd.2 44 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-07T10:17:54.258 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 07 10:17:53 vm04 podman[61510]: 2026-03-07 10:17:53.997149166 +0000 UTC m=+5.052640000 container died f70e0de999c15fea2f5a5b6f2fa9e213cb4284572ef02a6ce47deaa0dbb51ddc (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-2, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) 2026-03-07T10:17:54.258 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 07 10:17:54 vm04 podman[61510]: 2026-03-07 10:17:54.019353819 +0000 UTC m=+5.074844653 container remove f70e0de999c15fea2f5a5b6f2fa9e213cb4284572ef02a6ce47deaa0dbb51ddc (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-2, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git) 2026-03-07T10:17:54.258 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 07 10:17:54 vm04 bash[61510]: ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-2 2026-03-07T10:17:54.258 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 07 10:17:54 vm04 podman[61589]: 2026-03-07 10:17:54.164138319 +0000 UTC m=+0.016464965 container create 6abd3f567942845ffe96e482511adca7059e977f4bf5d78a411d77abb28bbbf1 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-2-deactivate, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default) 2026-03-07T10:17:54.258 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 07 10:17:54 vm04 podman[61589]: 2026-03-07 10:17:54.203205049 +0000 UTC m=+0.055531705 container init 6abd3f567942845ffe96e482511adca7059e977f4bf5d78a411d77abb28bbbf1 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-2-deactivate, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6) 2026-03-07T10:17:54.258 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 07 10:17:54 vm04 podman[61589]: 2026-03-07 10:17:54.207972148 +0000 UTC m=+0.060298804 container start 6abd3f567942845ffe96e482511adca7059e977f4bf5d78a411d77abb28bbbf1 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-2-deactivate, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True) 2026-03-07T10:17:54.258 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 07 10:17:54 vm04 podman[61589]: 2026-03-07 10:17:54.208878194 +0000 UTC m=+0.061204850 container attach 6abd3f567942845ffe96e482511adca7059e977f4bf5d78a411d77abb28bbbf1 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-2-deactivate, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default) 2026-03-07T10:17:54.600 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 07 10:17:54 vm04 podman[61589]: 2026-03-07 10:17:54.157851848 +0000 UTC m=+0.010178514 image pull 8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1 harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0 2026-03-07T10:17:54.724 DEBUG:teuthology.orchestra.run.vm04:> sudo pkill -f 'journalctl -f -n 0 -u ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@osd.2.service' 2026-03-07T10:17:54.758 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-07T10:17:54.758 INFO:tasks.cephadm.osd.2:Stopped osd.2 2026-03-07T10:17:54.758 INFO:tasks.cephadm.osd.3:Stopping osd.3... 2026-03-07T10:17:54.759 DEBUG:teuthology.orchestra.run.vm04:> sudo systemctl stop ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@osd.3 2026-03-07T10:17:54.944 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 07 10:17:54 vm04 systemd[1]: Stopping Ceph osd.3 for 3fd6e214-1a0e-11f1-b256-99cfc35f3328... 2026-03-07T10:17:55.350 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 07 10:17:54 vm04 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-3[57953]: 2026-03-07T10:17:54.943+0000 7fb1be28b640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.3 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-07T10:17:55.350 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 07 10:17:54 vm04 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-3[57953]: 2026-03-07T10:17:54.943+0000 7fb1be28b640 -1 osd.3 44 *** Got signal Terminated *** 2026-03-07T10:17:55.350 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 07 10:17:54 vm04 ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-3[57953]: 2026-03-07T10:17:54.943+0000 7fb1be28b640 -1 osd.3 44 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-07T10:18:00.236 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 07 10:17:59 vm04 podman[61684]: 2026-03-07 10:17:59.969233811 +0000 UTC m=+5.074922249 container died 6ceceeb771a5a341c44fa478792dd23f6f759509547bc12ca7716761bb3604c7 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9) 2026-03-07T10:18:00.237 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 07 10:17:59 vm04 podman[61684]: 2026-03-07 10:17:59.998971077 +0000 UTC m=+5.104659505 container remove 6ceceeb771a5a341c44fa478792dd23f6f759509547bc12ca7716761bb3604c7 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-3, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) 2026-03-07T10:18:00.237 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 07 10:18:00 vm04 bash[61684]: ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-3 2026-03-07T10:18:00.237 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 07 10:18:00 vm04 podman[61751]: 2026-03-07 10:18:00.144484161 +0000 UTC m=+0.017347889 container create 8df8b47c7b4b562735b9159eb5f9cf538566cd7aea3bbaf9d4a125a2aa161616 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-3-deactivate, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6) 2026-03-07T10:18:00.237 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 07 10:18:00 vm04 podman[61751]: 2026-03-07 10:18:00.182666236 +0000 UTC m=+0.055529974 container init 8df8b47c7b4b562735b9159eb5f9cf538566cd7aea3bbaf9d4a125a2aa161616 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-3-deactivate, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default) 2026-03-07T10:18:00.237 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 07 10:18:00 vm04 podman[61751]: 2026-03-07 10:18:00.187464834 +0000 UTC m=+0.060328552 container start 8df8b47c7b4b562735b9159eb5f9cf538566cd7aea3bbaf9d4a125a2aa161616 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-3-deactivate, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True) 2026-03-07T10:18:00.237 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 07 10:18:00 vm04 podman[61751]: 2026-03-07 10:18:00.188647676 +0000 UTC m=+0.061511405 container attach 8df8b47c7b4b562735b9159eb5f9cf538566cd7aea3bbaf9d4a125a2aa161616 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-3-deactivate, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6) 2026-03-07T10:18:00.237 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 07 10:18:00 vm04 podman[61751]: 2026-03-07 10:18:00.13772804 +0000 UTC m=+0.010591777 image pull 8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1 harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0 2026-03-07T10:18:00.592 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 07 10:18:00 vm04 podman[61751]: 2026-03-07 10:18:00.592315505 +0000 UTC m=+0.465179234 container died 8df8b47c7b4b562735b9159eb5f9cf538566cd7aea3bbaf9d4a125a2aa161616 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328-osd-3-deactivate, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default) 2026-03-07T10:18:00.625 DEBUG:teuthology.orchestra.run.vm04:> sudo pkill -f 'journalctl -f -n 0 -u ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@osd.3.service' 2026-03-07T10:18:00.660 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-07T10:18:00.660 INFO:tasks.cephadm.osd.3:Stopped osd.3 2026-03-07T10:18:00.660 INFO:tasks.cephadm.osd.4:Stopping osd.4... 2026-03-07T10:18:00.660 DEBUG:teuthology.orchestra.run.vm07:> sudo systemctl stop ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@osd.4 2026-03-07T10:18:00.698 DEBUG:teuthology.orchestra.run.vm07:> sudo pkill -f 'journalctl -f -n 0 -u ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@osd.4.service' 2026-03-07T10:18:00.773 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-07T10:18:00.773 INFO:tasks.cephadm.osd.4:Stopped osd.4 2026-03-07T10:18:00.774 INFO:tasks.cephadm.osd.5:Stopping osd.5... 2026-03-07T10:18:00.774 DEBUG:teuthology.orchestra.run.vm07:> sudo systemctl stop ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@osd.5 2026-03-07T10:18:00.847 DEBUG:teuthology.orchestra.run.vm07:> sudo pkill -f 'journalctl -f -n 0 -u ceph-3fd6e214-1a0e-11f1-b256-99cfc35f3328@osd.5.service' 2026-03-07T10:18:00.921 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-07T10:18:00.921 INFO:tasks.cephadm.osd.5:Stopped osd.5 2026-03-07T10:18:00.921 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 --force --keep-logs 2026-03-07T10:18:01.901 INFO:teuthology.orchestra.run.vm01.stdout:Deleting cluster with fsid: 3fd6e214-1a0e-11f1-b256-99cfc35f3328 2026-03-07T10:18:03.242 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 --force --keep-logs 2026-03-07T10:18:03.374 INFO:teuthology.orchestra.run.vm04.stdout:Deleting cluster with fsid: 3fd6e214-1a0e-11f1-b256-99cfc35f3328 2026-03-07T10:18:04.791 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 --force --keep-logs 2026-03-07T10:18:04.920 INFO:teuthology.orchestra.run.vm07.stdout:Deleting cluster with fsid: 3fd6e214-1a0e-11f1-b256-99cfc35f3328 2026-03-07T10:18:05.320 DEBUG:teuthology.orchestra.run.vm01:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-07T10:18:05.347 DEBUG:teuthology.orchestra.run.vm04:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-07T10:18:05.376 DEBUG:teuthology.orchestra.run.vm07:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-07T10:18:05.412 INFO:tasks.cephadm:Archiving crash dumps... 2026-03-07T10:18:05.412 DEBUG:teuthology.misc:Transferring archived files from vm01:/var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/crash to /archive/irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/10/remote/vm01/crash 2026-03-07T10:18:05.412 DEBUG:teuthology.orchestra.run.vm01:> sudo tar c -f - -C /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/crash -- . 2026-03-07T10:18:05.440 INFO:teuthology.orchestra.run.vm01.stderr:tar: /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/crash: Cannot open: No such file or directory 2026-03-07T10:18:05.440 INFO:teuthology.orchestra.run.vm01.stderr:tar: Error is not recoverable: exiting now 2026-03-07T10:18:05.441 DEBUG:teuthology.misc:Transferring archived files from vm04:/var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/crash to /archive/irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/10/remote/vm04/crash 2026-03-07T10:18:05.441 DEBUG:teuthology.orchestra.run.vm04:> sudo tar c -f - -C /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/crash -- . 2026-03-07T10:18:05.467 INFO:teuthology.orchestra.run.vm04.stderr:tar: /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/crash: Cannot open: No such file or directory 2026-03-07T10:18:05.467 INFO:teuthology.orchestra.run.vm04.stderr:tar: Error is not recoverable: exiting now 2026-03-07T10:18:05.468 DEBUG:teuthology.misc:Transferring archived files from vm07:/var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/crash to /archive/irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/10/remote/vm07/crash 2026-03-07T10:18:05.468 DEBUG:teuthology.orchestra.run.vm07:> sudo tar c -f - -C /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/crash -- . 2026-03-07T10:18:05.507 INFO:teuthology.orchestra.run.vm07.stderr:tar: /var/lib/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/crash: Cannot open: No such file or directory 2026-03-07T10:18:05.507 INFO:teuthology.orchestra.run.vm07.stderr:tar: Error is not recoverable: exiting now 2026-03-07T10:18:05.508 INFO:tasks.cephadm:Checking cluster log for badness... 2026-03-07T10:18:05.508 DEBUG:teuthology.orchestra.run.vm01:> sudo egrep '\[ERR\]|\[WRN\]|\[SEC\]' /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph.log | egrep CEPHADM_ | egrep -v '\(MDS_ALL_DOWN\)' | egrep -v '\(MDS_UP_LESS_THAN_MAX\)' | egrep -v MON_DOWN | egrep -v 'mons down' | egrep -v 'mon down' | egrep -v 'out of quorum' | egrep -v CEPHADM_STRAY_HOST | egrep -v CEPHADM_STRAY_DAEMON | egrep -v CEPHADM_FAILED_DAEMON | head -n 1 2026-03-07T10:18:05.543 INFO:tasks.cephadm:Compressing logs... 2026-03-07T10:18:05.543 DEBUG:teuthology.orchestra.run.vm01:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-03-07T10:18:05.584 DEBUG:teuthology.orchestra.run.vm04:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-03-07T10:18:05.585 DEBUG:teuthology.orchestra.run.vm07:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-03-07T10:18:05.610 INFO:teuthology.orchestra.run.vm01.stderr:find: gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-03-07T10:18:05.610 INFO:teuthology.orchestra.run.vm01.stderr:‘/var/log/rbd-target-api’: No such file or directory 2026-03-07T10:18:05.610 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-mon.a.log 2026-03-07T10:18:05.612 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/cephadm.log: /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-mon.a.log: 86.4% -- replaced with /var/log/ceph/cephadm.log.gz 2026-03-07T10:18:05.612 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph.log 2026-03-07T10:18:05.617 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-mgr.a.log 2026-03-07T10:18:05.618 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph.log: 84.9% -- replaced with /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph.log.gz 2026-03-07T10:18:05.618 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph.audit.log 2026-03-07T10:18:05.624 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-mgr.a.log: gzip -5 --verbose -- /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph.cephadm.log 2026-03-07T10:18:05.626 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph.audit.log: 90.7% -- replaced with /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph.audit.log.gz 2026-03-07T10:18:05.626 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-03-07T10:18:05.627 INFO:teuthology.orchestra.run.vm04.stderr:find: ‘/var/log/rbd-target-api’: No such file or directory 2026-03-07T10:18:05.628 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-volume.log 2026-03-07T10:18:05.629 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/cephadm.log: 83.3% -- replaced with /var/log/ceph/cephadm.log.gz 2026-03-07T10:18:05.630 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-mon.b.log 2026-03-07T10:18:05.630 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-volume.log 2026-03-07T10:18:05.631 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph.cephadm.log: 82.8% -- replaced with /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph.cephadm.log.gz 2026-03-07T10:18:05.635 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-osd.0.log 2026-03-07T10:18:05.635 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-volume.log: gzip -5 --verbose -- /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph.audit.log 2026-03-07T10:18:05.636 INFO:teuthology.orchestra.run.vm07.stderr:gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-03-07T10:18:05.637 INFO:teuthology.orchestra.run.vm07.stderr:find: ‘/var/log/rbd-target-api’: No such file or directory 2026-03-07T10:18:05.638 INFO:teuthology.orchestra.run.vm07.stderr:gzip -5 --verbose -- /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-volume.log 2026-03-07T10:18:05.639 INFO:teuthology.orchestra.run.vm07.stderr:/var/log/ceph/cephadm.log: gzip -5 --verbose -- /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-mon.c.log 2026-03-07T10:18:05.639 INFO:teuthology.orchestra.run.vm07.stderr: 84.0% -- replaced with /var/log/ceph/cephadm.log.gz 2026-03-07T10:18:05.640 INFO:teuthology.orchestra.run.vm07.stderr:/var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-volume.log: gzip -5 --verbose -- /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph.audit.log 2026-03-07T10:18:05.640 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-mon.b.log: gzip -5 --verbose -- /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph.log 2026-03-07T10:18:05.641 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph.audit.log: 90.9% -- replaced with /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph.audit.log.gz 2026-03-07T10:18:05.644 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-volume.log: gzip -5 --verbose -- /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-osd.1.log 2026-03-07T10:18:05.649 INFO:teuthology.orchestra.run.vm07.stderr:/var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-mon.c.log: gzip -5 --verbose -- /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph.log 2026-03-07T10:18:05.649 INFO:teuthology.orchestra.run.vm04.stderr: 92.8% -- replaced with /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-volume.log.gz 2026-03-07T10:18:05.650 INFO:teuthology.orchestra.run.vm07.stderr:/var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph.audit.log: 90.6% -- replaced with /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph.audit.log.gz 2026-03-07T10:18:05.650 INFO:teuthology.orchestra.run.vm07.stderr:gzip -5 --verbose -- /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph.cephadm.log 2026-03-07T10:18:05.651 INFO:teuthology.orchestra.run.vm07.stderr:/var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph.log: 83.1% -- replaced with /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph.log.gz 2026-03-07T10:18:05.651 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph.cephadm.log 2026-03-07T10:18:05.652 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph.log: 84.8% -- replaced with /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph.log.gz 2026-03-07T10:18:05.654 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-mgr.b.log 2026-03-07T10:18:05.654 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph.cephadm.log: 82.9% -- replaced with /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph.cephadm.log.gz 2026-03-07T10:18:05.655 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-osd.2.log 2026-03-07T10:18:05.655 INFO:teuthology.orchestra.run.vm07.stderr:gzip -5 --verbose -- /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-osd.4.log 2026-03-07T10:18:05.655 INFO:teuthology.orchestra.run.vm07.stderr:/var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph.cephadm.log: 78.7% -- replaced with /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph.cephadm.log.gz 2026-03-07T10:18:05.657 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-mgr.b.log: 90.5% -- replaced with /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-mgr.b.log.gz 2026-03-07T10:18:05.657 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-osd.3.log 2026-03-07T10:18:05.657 INFO:teuthology.orchestra.run.vm07.stderr:gzip -5 --verbose -- /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-osd.5.log 2026-03-07T10:18:05.665 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-osd.0.log: /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-osd.1.log: 92.7% -- replaced with /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-volume.log.gz 2026-03-07T10:18:05.682 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-osd.2.log: /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-osd.3.log: 92.9% -- replaced with /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-osd.2.log.gz 2026-03-07T10:18:05.683 INFO:teuthology.orchestra.run.vm07.stderr:/var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-osd.4.log: /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-osd.5.log: 92.3% -- replaced with /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-mon.c.log.gz 2026-03-07T10:18:05.683 INFO:teuthology.orchestra.run.vm01.stderr: 93.0% -- replaced with /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-osd.1.log.gz 2026-03-07T10:18:05.684 INFO:teuthology.orchestra.run.vm01.stderr: 88.7% -- replaced with /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-mgr.a.log.gz 2026-03-07T10:18:05.684 INFO:teuthology.orchestra.run.vm07.stderr: 93.0% -- replaced with /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-osd.4.log.gz 2026-03-07T10:18:05.687 INFO:teuthology.orchestra.run.vm01.stderr: 92.7% -- replaced with /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-osd.0.log.gz 2026-03-07T10:18:05.687 INFO:teuthology.orchestra.run.vm07.stderr: 92.8% -- replaced with /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-osd.5.log.gz 2026-03-07T10:18:05.689 INFO:teuthology.orchestra.run.vm07.stderr: 92.9% -- replaced with /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-volume.log.gz 2026-03-07T10:18:05.690 INFO:teuthology.orchestra.run.vm07.stderr: 2026-03-07T10:18:05.690 INFO:teuthology.orchestra.run.vm07.stderr:real 0m0.075s 2026-03-07T10:18:05.690 INFO:teuthology.orchestra.run.vm07.stderr:user 0m0.067s 2026-03-07T10:18:05.690 INFO:teuthology.orchestra.run.vm07.stderr:sys 0m0.014s 2026-03-07T10:18:05.693 INFO:teuthology.orchestra.run.vm04.stderr: 92.3% -- replaced with /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-mon.b.log.gz 2026-03-07T10:18:05.697 INFO:teuthology.orchestra.run.vm04.stderr: 93.1% -- replaced with /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-osd.3.log.gz 2026-03-07T10:18:05.698 INFO:teuthology.orchestra.run.vm04.stderr: 2026-03-07T10:18:05.698 INFO:teuthology.orchestra.run.vm04.stderr:real 0m0.093s 2026-03-07T10:18:05.698 INFO:teuthology.orchestra.run.vm04.stderr:user 0m0.101s 2026-03-07T10:18:05.698 INFO:teuthology.orchestra.run.vm04.stderr:sys 0m0.027s 2026-03-07T10:18:05.765 INFO:teuthology.orchestra.run.vm01.stderr: 91.6% -- replaced with /var/log/ceph/3fd6e214-1a0e-11f1-b256-99cfc35f3328/ceph-mon.a.log.gz 2026-03-07T10:18:05.767 INFO:teuthology.orchestra.run.vm01.stderr: 2026-03-07T10:18:05.767 INFO:teuthology.orchestra.run.vm01.stderr:real 0m0.168s 2026-03-07T10:18:05.767 INFO:teuthology.orchestra.run.vm01.stderr:user 0m0.223s 2026-03-07T10:18:05.767 INFO:teuthology.orchestra.run.vm01.stderr:sys 0m0.021s 2026-03-07T10:18:05.767 INFO:tasks.cephadm:Archiving logs... 2026-03-07T10:18:05.768 DEBUG:teuthology.misc:Transferring archived files from vm01:/var/log/ceph to /archive/irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/10/remote/vm01/log 2026-03-07T10:18:05.768 DEBUG:teuthology.orchestra.run.vm01:> sudo tar c -f - -C /var/log/ceph -- . 2026-03-07T10:18:05.846 DEBUG:teuthology.misc:Transferring archived files from vm04:/var/log/ceph to /archive/irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/10/remote/vm04/log 2026-03-07T10:18:05.846 DEBUG:teuthology.orchestra.run.vm04:> sudo tar c -f - -C /var/log/ceph -- . 2026-03-07T10:18:05.884 DEBUG:teuthology.misc:Transferring archived files from vm07:/var/log/ceph to /archive/irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/10/remote/vm07/log 2026-03-07T10:18:05.884 DEBUG:teuthology.orchestra.run.vm07:> sudo tar c -f - -C /var/log/ceph -- . 2026-03-07T10:18:05.912 INFO:tasks.cephadm:Removing cluster... 2026-03-07T10:18:05.912 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 --force 2026-03-07T10:18:06.045 INFO:teuthology.orchestra.run.vm01.stdout:Deleting cluster with fsid: 3fd6e214-1a0e-11f1-b256-99cfc35f3328 2026-03-07T10:18:06.145 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 --force 2026-03-07T10:18:06.276 INFO:teuthology.orchestra.run.vm04.stdout:Deleting cluster with fsid: 3fd6e214-1a0e-11f1-b256-99cfc35f3328 2026-03-07T10:18:06.374 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 3fd6e214-1a0e-11f1-b256-99cfc35f3328 --force 2026-03-07T10:18:06.497 INFO:teuthology.orchestra.run.vm07.stdout:Deleting cluster with fsid: 3fd6e214-1a0e-11f1-b256-99cfc35f3328 2026-03-07T10:18:06.589 INFO:tasks.cephadm:Removing cephadm ... 2026-03-07T10:18:06.589 DEBUG:teuthology.orchestra.run.vm01:> rm -rf /home/ubuntu/cephtest/cephadm 2026-03-07T10:18:06.603 DEBUG:teuthology.orchestra.run.vm04:> rm -rf /home/ubuntu/cephtest/cephadm 2026-03-07T10:18:06.620 DEBUG:teuthology.orchestra.run.vm07:> rm -rf /home/ubuntu/cephtest/cephadm 2026-03-07T10:18:06.634 INFO:tasks.cephadm:Teardown complete 2026-03-07T10:18:06.634 DEBUG:teuthology.run_tasks:Unwinding manager clock 2026-03-07T10:18:06.636 INFO:teuthology.task.clock:Checking final clock skew... 2026-03-07T10:18:06.636 DEBUG:teuthology.orchestra.run.vm01:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-07T10:18:06.645 DEBUG:teuthology.orchestra.run.vm04:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-07T10:18:06.659 INFO:teuthology.orchestra.run.vm01.stderr:bash: line 1: ntpq: command not found 2026-03-07T10:18:06.662 DEBUG:teuthology.orchestra.run.vm07:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-07T10:18:06.663 INFO:teuthology.orchestra.run.vm01.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-07T10:18:06.663 INFO:teuthology.orchestra.run.vm01.stdout:=============================================================================== 2026-03-07T10:18:06.663 INFO:teuthology.orchestra.run.vm01.stdout:^- alpha.rueckgr.at 2 6 377 31 +175us[ +175us] +/- 63ms 2026-03-07T10:18:06.663 INFO:teuthology.orchestra.run.vm01.stdout:^* ns1.blazing.de 3 6 377 31 +89us[ +115us] +/- 17ms 2026-03-07T10:18:06.663 INFO:teuthology.orchestra.run.vm01.stdout:^+ server1b.meinberg.de 2 6 367 30 -248us[ -248us] +/- 52ms 2026-03-07T10:18:06.663 INFO:teuthology.orchestra.run.vm01.stdout:^- static.119.109.140.128.c> 2 6 377 33 -20us[+5560ns] +/- 63ms 2026-03-07T10:18:06.675 INFO:teuthology.orchestra.run.vm04.stderr:bash: line 1: ntpq: command not found 2026-03-07T10:18:06.679 INFO:teuthology.orchestra.run.vm04.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-07T10:18:06.679 INFO:teuthology.orchestra.run.vm04.stdout:=============================================================================== 2026-03-07T10:18:06.679 INFO:teuthology.orchestra.run.vm04.stdout:^- alpha.rueckgr.at 2 6 377 30 +45us[ +45us] +/- 63ms 2026-03-07T10:18:06.679 INFO:teuthology.orchestra.run.vm04.stdout:^* ns1.blazing.de 3 6 377 31 +12us[ +39us] +/- 17ms 2026-03-07T10:18:06.679 INFO:teuthology.orchestra.run.vm04.stdout:^- server1b.meinberg.de 2 6 36 290 -339us[ -251us] +/- 48ms 2026-03-07T10:18:06.679 INFO:teuthology.orchestra.run.vm04.stdout:^- static.119.109.140.128.c> 2 6 377 31 -118us[ -90us] +/- 63ms 2026-03-07T10:18:06.689 INFO:teuthology.orchestra.run.vm07.stderr:bash: line 1: ntpq: command not found 2026-03-07T10:18:06.692 INFO:teuthology.orchestra.run.vm07.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-07T10:18:06.692 INFO:teuthology.orchestra.run.vm07.stdout:=============================================================================== 2026-03-07T10:18:06.692 INFO:teuthology.orchestra.run.vm07.stdout:^- alpha.rueckgr.at 2 6 377 31 +102us[ +102us] +/- 63ms 2026-03-07T10:18:06.692 INFO:teuthology.orchestra.run.vm07.stdout:^* ns1.blazing.de 3 6 377 97 -14us[ -32us] +/- 17ms 2026-03-07T10:18:06.692 INFO:teuthology.orchestra.run.vm07.stdout:^- server1b.meinberg.de 2 6 74 291 -349us[ -267us] +/- 48ms 2026-03-07T10:18:06.692 INFO:teuthology.orchestra.run.vm07.stdout:^- static.119.109.140.128.c> 2 6 377 32 -136us[ -136us] +/- 63ms 2026-03-07T10:18:06.693 DEBUG:teuthology.run_tasks:Unwinding manager ansible.cephlab 2026-03-07T10:18:06.694 INFO:teuthology.task.ansible:Skipping ansible cleanup... 2026-03-07T10:18:06.695 DEBUG:teuthology.run_tasks:Unwinding manager selinux 2026-03-07T10:18:06.696 DEBUG:teuthology.run_tasks:Unwinding manager pcp 2026-03-07T10:18:06.698 DEBUG:teuthology.run_tasks:Unwinding manager internal.timer 2026-03-07T10:18:06.700 INFO:teuthology.task.internal:Duration was 704.138773 seconds 2026-03-07T10:18:06.701 DEBUG:teuthology.run_tasks:Unwinding manager internal.syslog 2026-03-07T10:18:06.703 INFO:teuthology.task.internal.syslog:Shutting down syslog monitoring... 2026-03-07T10:18:06.703 DEBUG:teuthology.orchestra.run.vm01:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-07T10:18:06.706 DEBUG:teuthology.orchestra.run.vm04:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-07T10:18:06.722 DEBUG:teuthology.orchestra.run.vm07:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-07T10:18:06.744 INFO:teuthology.orchestra.run.vm01.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-07T10:18:06.767 INFO:teuthology.orchestra.run.vm04.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-07T10:18:06.774 INFO:teuthology.orchestra.run.vm07.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-07T10:18:07.209 INFO:teuthology.task.internal.syslog:Checking logs for errors... 2026-03-07T10:18:07.210 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm01.local 2026-03-07T10:18:07.210 DEBUG:teuthology.orchestra.run.vm01:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-07T10:18:07.234 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm04.local 2026-03-07T10:18:07.234 DEBUG:teuthology.orchestra.run.vm04:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-07T10:18:07.259 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm07.local 2026-03-07T10:18:07.260 DEBUG:teuthology.orchestra.run.vm07:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-07T10:18:07.284 INFO:teuthology.task.internal.syslog:Gathering journactl... 2026-03-07T10:18:07.284 DEBUG:teuthology.orchestra.run.vm01:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-07T10:18:07.285 DEBUG:teuthology.orchestra.run.vm04:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-07T10:18:07.302 DEBUG:teuthology.orchestra.run.vm07:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-07T10:18:08.212 INFO:teuthology.task.internal.syslog:Compressing syslogs... 2026-03-07T10:18:08.212 DEBUG:teuthology.orchestra.run.vm01:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-07T10:18:08.214 DEBUG:teuthology.orchestra.run.vm04:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-07T10:18:08.215 DEBUG:teuthology.orchestra.run.vm07:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-07T10:18:08.237 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-07T10:18:08.238 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-07T10:18:08.238 INFO:teuthology.orchestra.run.vm01.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-07T10:18:08.238 INFO:teuthology.orchestra.run.vm01.stderr: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-07T10:18:08.238 INFO:teuthology.orchestra.run.vm01.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-07T10:18:08.240 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-07T10:18:08.240 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-07T10:18:08.240 INFO:teuthology.orchestra.run.vm04.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-07T10:18:08.241 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-07T10:18:08.241 INFO:teuthology.orchestra.run.vm07.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-07T10:18:08.241 INFO:teuthology.orchestra.run.vm07.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-07T10:18:08.241 INFO:teuthology.orchestra.run.vm04.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: /home/ubuntu/cephtest/archive/syslog/journalctl.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-07T10:18:08.241 INFO:teuthology.orchestra.run.vm07.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-07T10:18:08.241 INFO:teuthology.orchestra.run.vm07.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-07T10:18:08.241 INFO:teuthology.orchestra.run.vm07.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-07T10:18:08.373 INFO:teuthology.orchestra.run.vm04.stderr: 98.3% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-07T10:18:08.384 INFO:teuthology.orchestra.run.vm01.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 98.2% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-07T10:18:08.558 INFO:teuthology.orchestra.run.vm07.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 98.5% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-07T10:18:08.560 DEBUG:teuthology.run_tasks:Unwinding manager internal.sudo 2026-03-07T10:18:08.562 INFO:teuthology.task.internal:Restoring /etc/sudoers... 2026-03-07T10:18:08.562 DEBUG:teuthology.orchestra.run.vm01:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-07T10:18:08.587 DEBUG:teuthology.orchestra.run.vm04:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-07T10:18:08.613 DEBUG:teuthology.orchestra.run.vm07:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-07T10:18:08.637 DEBUG:teuthology.run_tasks:Unwinding manager internal.coredump 2026-03-07T10:18:08.639 DEBUG:teuthology.orchestra.run.vm01:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-07T10:18:08.641 DEBUG:teuthology.orchestra.run.vm04:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-07T10:18:08.656 DEBUG:teuthology.orchestra.run.vm07:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-07T10:18:08.663 INFO:teuthology.orchestra.run.vm01.stdout:kernel.core_pattern = core 2026-03-07T10:18:08.679 INFO:teuthology.orchestra.run.vm04.stdout:kernel.core_pattern = core 2026-03-07T10:18:08.702 INFO:teuthology.orchestra.run.vm07.stdout:kernel.core_pattern = core 2026-03-07T10:18:08.715 DEBUG:teuthology.orchestra.run.vm01:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-07T10:18:08.736 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-07T10:18:08.737 DEBUG:teuthology.orchestra.run.vm04:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-07T10:18:08.751 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-07T10:18:08.752 DEBUG:teuthology.orchestra.run.vm07:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-07T10:18:08.772 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-07T10:18:08.773 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive 2026-03-07T10:18:08.775 INFO:teuthology.task.internal:Transferring archived files... 2026-03-07T10:18:08.775 DEBUG:teuthology.misc:Transferring archived files from vm01:/home/ubuntu/cephtest/archive to /archive/irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/10/remote/vm01 2026-03-07T10:18:08.775 DEBUG:teuthology.orchestra.run.vm01:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-07T10:18:08.805 DEBUG:teuthology.misc:Transferring archived files from vm04:/home/ubuntu/cephtest/archive to /archive/irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/10/remote/vm04 2026-03-07T10:18:08.806 DEBUG:teuthology.orchestra.run.vm04:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-07T10:18:08.837 DEBUG:teuthology.misc:Transferring archived files from vm07:/home/ubuntu/cephtest/archive to /archive/irq0-2026-03-07_10:02:54-orch:cephadm:workunits-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/10/remote/vm07 2026-03-07T10:18:08.837 DEBUG:teuthology.orchestra.run.vm07:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-07T10:18:08.869 INFO:teuthology.task.internal:Removing archive directory... 2026-03-07T10:18:08.869 DEBUG:teuthology.orchestra.run.vm01:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-07T10:18:08.870 DEBUG:teuthology.orchestra.run.vm04:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-07T10:18:08.879 DEBUG:teuthology.orchestra.run.vm07:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-07T10:18:08.924 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive_upload 2026-03-07T10:18:08.927 INFO:teuthology.task.internal:Not uploading archives. 2026-03-07T10:18:08.927 DEBUG:teuthology.run_tasks:Unwinding manager internal.base 2026-03-07T10:18:08.929 INFO:teuthology.task.internal:Tidying up after the test... 2026-03-07T10:18:08.929 DEBUG:teuthology.orchestra.run.vm01:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-07T10:18:08.931 DEBUG:teuthology.orchestra.run.vm04:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-07T10:18:08.935 DEBUG:teuthology.orchestra.run.vm07:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-07T10:18:08.945 INFO:teuthology.orchestra.run.vm01.stdout: 8532138 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 7 10:18 /home/ubuntu/cephtest 2026-03-07T10:18:08.953 INFO:teuthology.orchestra.run.vm04.stdout: 8532145 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 7 10:18 /home/ubuntu/cephtest 2026-03-07T10:18:08.983 INFO:teuthology.orchestra.run.vm07.stdout: 8532145 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 7 10:18 /home/ubuntu/cephtest 2026-03-07T10:18:08.984 DEBUG:teuthology.run_tasks:Unwinding manager console_log 2026-03-07T10:18:08.989 INFO:teuthology.run:Summary data: description: orch:cephadm:workunits/{0-distro/centos_9.stream agent/on mon_election/connectivity task/test_host_drain} duration: 704.1387732028961 owner: irq0 success: true 2026-03-07T10:18:08.989 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-07T10:18:09.012 INFO:teuthology.run:pass